patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -222,13 +222,15 @@ class Order extends BaseAction implements EventSubscriberInterface
$pse = $cartItem->getProductSaleElements();
/* check still in stock */
- if ($cartItem->getQuantity() > $pse->getQuantity()) {
+ if ($cartItem->getQuantity() > $pse->getQuantity() && true === ConfigQuery::checkAvailableStock()) {
throw new TheliaProcessException("Not enough stock", TheliaProcessException::CART_ITEM_NOT_ENOUGH_STOCK, $cartItem);
}
/* decrease stock */
+ $newStock = $pse->getQuantity() - $cartItem->getQuantity();
+ if($newStock < 0) $newStock = 0; //Forbid negative stock
$pse->setQuantity(
- $pse->getQuantity() - $cartItem->getQuantity()
+ $newStock
);
$pse->save($con);
| 1 | <?php
/*************************************************************************************/
/* This file is part of the Thelia package. */
/* */
/* Copyright (c) OpenStudio */
/* email : [email protected] */
/* web : http://www.thelia.net */
/* */
/* For the full copyright and license information, please view the LICENSE.txt */
/* file that was distributed with this source code. */
/*************************************************************************************/
namespace Thelia\Action;
use Symfony\Component\EventDispatcher\EventDispatcherInterface;
use Symfony\Component\EventDispatcher\EventSubscriberInterface;
use Thelia\Cart\CartTrait;
use Thelia\Core\Event\Order\OrderAddressEvent;
use Thelia\Core\Event\Order\OrderEvent;
use Thelia\Core\Event\Order\OrderManualEvent;
use Thelia\Core\Event\Order\OrderPaymentEvent;
use Thelia\Core\Event\TheliaEvents;
use Thelia\Core\HttpFoundation\Request;
use Thelia\Core\Security\SecurityContext;
use Thelia\Core\Template\ParserInterface;
use Thelia\Exception\TheliaProcessException;
use Thelia\Mailer\MailerFactory;
use Thelia\Model\AddressQuery;
use Thelia\Model\Cart as CartModel;
use Thelia\Model\ConfigQuery;
use Thelia\Model\Currency as CurrencyModel;
use Thelia\Model\Customer as CustomerModel;
use Thelia\Model\Lang as LangModel;
use Thelia\Model\Map\OrderTableMap;
use Thelia\Model\MessageQuery;
use Thelia\Model\Order as ModelOrder;
use Thelia\Model\OrderAddress;
use Thelia\Model\OrderProduct;
use Thelia\Model\OrderProductAttributeCombination;
use Thelia\Model\OrderStatusQuery;
use Thelia\Tools\I18n;
/**
*
* Class Order
* @package Thelia\Action
* @author Etienne Roudeix <[email protected]>
*/
class Order extends BaseAction implements EventSubscriberInterface
{
use CartTrait;
/**
* @var \Thelia\Core\HttpFoundation\Request
*/
protected $request;
/**
* @var MailerFactory
*/
protected $mailer;
/**
* @var ParserInterface
*/
protected $parser;
/**
* @var SecurityContext
*/
protected $securityContext;
public function __construct(Request $request, ParserInterface $parser, MailerFactory $mailer, SecurityContext $securityContext)
{
$this->request = $request;
$this->parser = $parser;
$this->mailer = $mailer;
$this->securityContext = $securityContext;
}
/**
* @param \Thelia\Core\Event\Order\OrderEvent $event
*/
public function setDeliveryAddress(OrderEvent $event)
{
$order = $event->getOrder();
$order->setChoosenDeliveryAddress($event->getDeliveryAddress());
$event->setOrder($order);
}
/**
* @param \Thelia\Core\Event\Order\OrderEvent $event
*/
public function setDeliveryModule(OrderEvent $event)
{
$order = $event->getOrder();
$deliveryModuleId = $event->getDeliveryModule();
$order->setDeliveryModuleId($deliveryModuleId);
// Reset postage cost if the delivery module had been removed
if ($deliveryModuleId <= 0) {
$order->setPostage(0);
}
$event->setOrder($order);
}
/**
* @param \Thelia\Core\Event\Order\OrderEvent $event
*/
public function setPostage(OrderEvent $event)
{
$order = $event->getOrder();
$order->setPostage($event->getPostage());
$event->setOrder($order);
}
/**
* @param \Thelia\Core\Event\Order\OrderEvent $event
*/
public function setInvoiceAddress(OrderEvent $event)
{
$order = $event->getOrder();
$order->setChoosenInvoiceAddress($event->getInvoiceAddress());
$event->setOrder($order);
}
/**
* @param \Thelia\Core\Event\Order\OrderEvent $event
*/
public function setPaymentModule(OrderEvent $event)
{
$order = $event->getOrder();
$order->setPaymentModuleId($event->getPaymentModule());
$event->setOrder($order);
}
protected function createOrder(EventDispatcherInterface $dispatcher, ModelOrder $sessionOrder, CurrencyModel $currency, LangModel $lang, CartModel $cart, CustomerModel $customer)
{
$con = \Propel\Runtime\Propel::getConnection(
OrderTableMap::DATABASE_NAME
);
$con->beginTransaction();
$placedOrder = $sessionOrder->copy();
$placedOrder->setDispatcher($dispatcher);
$deliveryAddress = AddressQuery::create()->findPk($sessionOrder->getChoosenDeliveryAddress());
$taxCountry = $deliveryAddress->getCountry();
$invoiceAddress = AddressQuery::create()->findPk($sessionOrder->getChoosenInvoiceAddress());
$cartItems = $cart->getCartItems();
/* fulfill order */
$placedOrder->setCustomerId($customer->getId());
$placedOrder->setCurrencyId($currency->getId());
$placedOrder->setCurrencyRate($currency->getRate());
$placedOrder->setLangId($lang->getId());
/* hard save the delivery and invoice addresses */
$deliveryOrderAddress = new OrderAddress();
$deliveryOrderAddress
->setCustomerTitleId($deliveryAddress->getTitleId())
->setCompany($deliveryAddress->getCompany())
->setFirstname($deliveryAddress->getFirstname())
->setLastname($deliveryAddress->getLastname())
->setAddress1($deliveryAddress->getAddress1())
->setAddress2($deliveryAddress->getAddress2())
->setAddress3($deliveryAddress->getAddress3())
->setZipcode($deliveryAddress->getZipcode())
->setCity($deliveryAddress->getCity())
->setPhone($deliveryAddress->getPhone())
->setCountryId($deliveryAddress->getCountryId())
->save($con)
;
$invoiceOrderAddress = new OrderAddress();
$invoiceOrderAddress
->setCustomerTitleId($invoiceAddress->getTitleId())
->setCompany($invoiceAddress->getCompany())
->setFirstname($invoiceAddress->getFirstname())
->setLastname($invoiceAddress->getLastname())
->setAddress1($invoiceAddress->getAddress1())
->setAddress2($invoiceAddress->getAddress2())
->setAddress3($invoiceAddress->getAddress3())
->setZipcode($invoiceAddress->getZipcode())
->setCity($invoiceAddress->getCity())
->setPhone($invoiceAddress->getPhone())
->setCountryId($invoiceAddress->getCountryId())
->save($con)
;
$placedOrder->setDeliveryOrderAddressId($deliveryOrderAddress->getId());
$placedOrder->setInvoiceOrderAddressId($invoiceOrderAddress->getId());
$placedOrder->setStatusId(
OrderStatusQuery::getNotPaidStatus()->getId()
);
/* memorize discount */
$placedOrder->setDiscount(
$cart->getDiscount()
);
$placedOrder->save($con);
/* fulfill order_products and decrease stock */
foreach ($cartItems as $cartItem) {
$product = $cartItem->getProduct();
/* get translation */
$productI18n = I18n::forceI18nRetrieving($lang->getLocale(), 'Product', $product->getId());
$pse = $cartItem->getProductSaleElements();
/* check still in stock */
if ($cartItem->getQuantity() > $pse->getQuantity()) {
throw new TheliaProcessException("Not enough stock", TheliaProcessException::CART_ITEM_NOT_ENOUGH_STOCK, $cartItem);
}
/* decrease stock */
$pse->setQuantity(
$pse->getQuantity() - $cartItem->getQuantity()
);
$pse->save($con);
/* get tax */
$taxRuleI18n = I18n::forceI18nRetrieving($lang->getLocale(), 'TaxRule', $product->getTaxRuleId());
$taxDetail = $product->getTaxRule()->getTaxDetail(
$product,
$taxCountry,
$cartItem->getPrice(),
$cartItem->getPromoPrice(),
$lang->getLocale()
);
$orderProduct = new OrderProduct();
$orderProduct
->setOrderId($placedOrder->getId())
->setProductRef($product->getRef())
->setProductSaleElementsRef($pse->getRef())
->setTitle($productI18n->getTitle())
->setChapo($productI18n->getChapo())
->setDescription($productI18n->getDescription())
->setPostscriptum($productI18n->getPostscriptum())
->setQuantity($cartItem->getQuantity())
->setPrice($cartItem->getPrice())
->setPromoPrice($cartItem->getPromoPrice())
->setWasNew($pse->getNewness())
->setWasInPromo($cartItem->getPromo())
->setWeight($pse->getWeight())
->setTaxRuleTitle($taxRuleI18n->getTitle())
->setTaxRuleDescription($taxRuleI18n->getDescription())
->setEanCode($pse->getEanCode())
->setCartIemId($cartItem->getId())
->setDispatcher($dispatcher)
->save($con)
;
/* fulfill order_product_tax */
foreach ($taxDetail as $tax) {
$tax->setOrderProductId($orderProduct->getId());
$tax->save($con);
}
/* fulfill order_attribute_combination and decrease stock */
foreach ($pse->getAttributeCombinations() as $attributeCombination) {
$attribute = I18n::forceI18nRetrieving($lang->getLocale(), 'Attribute', $attributeCombination->getAttributeId());
$attributeAv = I18n::forceI18nRetrieving($lang->getLocale(), 'AttributeAv', $attributeCombination->getAttributeAvId());
$orderAttributeCombination = new OrderProductAttributeCombination();
$orderAttributeCombination
->setOrderProductId($orderProduct->getId())
->setAttributeTitle($attribute->getTitle())
->setAttributeChapo($attribute->getChapo())
->setAttributeDescription($attribute->getDescription())
->setAttributePostscriptum($attribute->getPostscriptum())
->setAttributeAvTitle($attributeAv->getTitle())
->setAttributeAvChapo($attributeAv->getChapo())
->setAttributeAvDescription($attributeAv->getDescription())
->setAttributeAvPostscriptum($attributeAv->getPostscriptum())
->save($con);
}
}
$con->commit();
return $placedOrder;
}
/**
* Create an order outside of the front-office context, e.g. manually from the back-office.
*/
public function createManual(OrderManualEvent $event)
{
$event->setPlacedOrder(
$this->createOrder(
$event->getDispatcher(),
$event->getOrder(),
$event->getCurrency(),
$event->getLang(),
$event->getCart(),
$event->getCustomer()
)
);
$event->setOrder(new \Thelia\Model\Order());
}
/**
* @param OrderEvent $event
*
* @throws \Thelia\Exception\TheliaProcessException
*/
public function create(OrderEvent $event)
{
$session = $this->getSession();
$placedOrder = $this->createOrder(
$event->getDispatcher(),
$event->getOrder(),
$session->getCurrency(),
$session->getLang(),
$session->getCart(),
$this->securityContext->getCustomerUser()
);
$event->getDispatcher()->dispatch(TheliaEvents::ORDER_BEFORE_PAYMENT, new OrderEvent($placedOrder));
/* but memorize placed order */
$event->setOrder(new \Thelia\Model\Order());
$event->setPlacedOrder($placedOrder);
/* empty cart */
$dispatcher = $event->getDispatcher();
/* call pay method */
$payEvent = new OrderPaymentEvent($placedOrder);
$dispatcher->dispatch(TheliaEvents::MODULE_PAY, $payEvent);
if ($payEvent->hasResponse()) {
$event->setResponse($payEvent->getResponse());
}
}
/**
* @param \Thelia\Core\Event\Order\OrderEvent $event
*/
public function sendOrderEmail(OrderEvent $event)
{
$contact_email = ConfigQuery::read('store_email');
if ($contact_email) {
$message = MessageQuery::create()
->filterByName('order_confirmation')
->findOne();
if (false === $message) {
throw new \Exception("Failed to load message 'order_confirmation'.");
}
$order = $event->getOrder();
$customer = $order->getCustomer();
$this->parser->assign('order_id', $order->getId());
$this->parser->assign('order_ref', $order->getRef());
$message
->setLocale($order->getLang()->getLocale());
$instance = \Swift_Message::newInstance()
->addTo($customer->getEmail(), $customer->getFirstname()." ".$customer->getLastname())
->addFrom($contact_email, ConfigQuery::read('store_name'))
;
// Build subject and body
$message->buildMessage($this->parser, $instance);
$this->getMailer()->send($instance);
}
}
/**
*
* return an instance of \Swift_Mailer with good Transporter configured.
*
* @return \Swift_Mailer
*/
public function getMailer()
{
return $this->mailer->getSwiftMailer();
}
/**
* @param OrderEvent $event
*/
public function updateStatus(OrderEvent $event)
{
$order = $event->getOrder();
$order->setStatusId($event->getStatus());
$order->save();
$event->setOrder($order);
}
/**
* @param OrderEvent $event
*/
public function updateDeliveryRef(OrderEvent $event)
{
$order = $event->getOrder();
$order->setDeliveryRef($event->getDeliveryRef());
$order->save();
$event->setOrder($order);
}
/**
* @param OrderAddressEvent $event
*/
public function updateAddress(OrderAddressEvent $event)
{
$orderAddress = $event->getOrderAddress();
$orderAddress
->setCustomerTitleId($event->getTitle())
->setCompany($event->getCompany())
->setFirstname($event->getFirstname())
->setLastname($event->getLastname())
->setAddress1($event->getAddress1())
->setAddress2($event->getAddress2())
->setAddress3($event->getAddress3())
->setZipcode($event->getZipcode())
->setCity($event->getCity())
->setCountryId($event->getCountry())
->setPhone($event->getPhone())
;
$orderAddress->save();
$event->setOrderAddress($orderAddress);
}
/**
* Returns an array of event names this subscriber wants to listen to.
*
* The array keys are event names and the value can be:
*
* * The method name to call (priority defaults to 0)
* * An array composed of the method name to call and the priority
* * An array of arrays composed of the method names to call and respective
* priorities, or 0 if unset
*
* For instance:
*
* * array('eventName' => 'methodName')
* * array('eventName' => array('methodName', $priority))
* * array('eventName' => array(array('methodName1', $priority), array('methodName2'))
*
* @return array The event names to listen to
*
* @api
*/
public static function getSubscribedEvents()
{
return array(
TheliaEvents::ORDER_SET_DELIVERY_ADDRESS => array("setDeliveryAddress", 128),
TheliaEvents::ORDER_SET_DELIVERY_MODULE => array("setDeliveryModule", 128),
TheliaEvents::ORDER_SET_POSTAGE => array("setPostage", 128),
TheliaEvents::ORDER_SET_INVOICE_ADDRESS => array("setInvoiceAddress", 128),
TheliaEvents::ORDER_SET_PAYMENT_MODULE => array("setPaymentModule", 128),
TheliaEvents::ORDER_PAY => array("create", 128),
TheliaEvents::ORDER_BEFORE_PAYMENT => array("sendOrderEmail", 128),
TheliaEvents::ORDER_UPDATE_STATUS => array("updateStatus", 128),
TheliaEvents::ORDER_UPDATE_DELIVERY_REF => array("updateDeliveryRef", 128),
TheliaEvents::ORDER_UPDATE_ADDRESS => array("updateAddress", 128),
TheliaEvents::ORDER_CREATE_MANUAL => array("createManual", 128),
);
}
/**
* Returns the session from the current request
*
* @return \Thelia\Core\HttpFoundation\Session\Session
*/
protected function getSession()
{
return $this->request->getSession();
}
}
| 1 | 10,360 | variable must be in camelCase => $newStock | thelia-thelia | php |
@@ -73,7 +73,7 @@ func setActionFlags(cmds ...*cobra.Command) {
func sendAction(elp action.Envelope) string {
fmt.Printf("Enter password #%s:\n", signer)
- bytePassword, err := terminal.ReadPassword(syscall.Stdin)
+ bytePassword, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
log.L().Error("fail to get password", zap.Error(err))
return err.Error() | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package action
import (
"context"
"encoding/hex"
"fmt"
"syscall"
"github.com/golang/protobuf/proto"
"github.com/spf13/cobra"
"go.uber.org/zap"
"golang.org/x/crypto/ssh/terminal"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/cli/ioctl/cmd/account"
"github.com/iotexproject/iotex-core/cli/ioctl/util"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/keypair"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-core/protogen/iotexapi"
"github.com/iotexproject/iotex-core/protogen/iotextypes"
)
// Flags
var (
alias string
gasLimit uint64
gasPrice string
nonce uint64
signer string
bytecode []byte
)
// ActionCmd represents the account command
var ActionCmd = &cobra.Command{
Use: "action",
Short: "Deal with actions of IoTeX blockchain",
Args: cobra.MinimumNArgs(1),
}
func init() {
ActionCmd.AddCommand(actionHashCmd)
ActionCmd.AddCommand(actionTransferCmd)
ActionCmd.AddCommand(actionDeployCmd)
ActionCmd.AddCommand(actionInvokeCmd)
ActionCmd.AddCommand(actionClaimCmd)
setActionFlags(actionTransferCmd, actionDeployCmd, actionInvokeCmd, actionClaimCmd)
}
func setActionFlags(cmds ...*cobra.Command) {
for _, cmd := range cmds {
cmd.Flags().Uint64VarP(&gasLimit, "gas-limit", "l", 0, "set gas limit")
cmd.Flags().StringVarP(&gasPrice, "gas-price", "p", "",
"set gas price (unit: 10^(-6)Iotx)")
cmd.Flags().StringVarP(&signer, "signer", "s", "", "choose a signing key")
cmd.Flags().Uint64VarP(&nonce, "nonce", "n", 0, "set nonce")
cmd.MarkFlagRequired("gas-limit")
cmd.MarkFlagRequired("gas-price")
cmd.MarkFlagRequired("signer")
if cmd == actionDeployCmd || cmd == actionInvokeCmd {
cmd.Flags().BytesHexVarP(&bytecode, "bytecode", "b", nil, "set the byte code")
actionInvokeCmd.MarkFlagRequired("bytecode")
}
}
}
func sendAction(elp action.Envelope) string {
fmt.Printf("Enter password #%s:\n", signer)
bytePassword, err := terminal.ReadPassword(syscall.Stdin)
if err != nil {
log.L().Error("fail to get password", zap.Error(err))
return err.Error()
}
password := string(bytePassword)
ehash := elp.Hash()
sig, err := account.Sign(signer, password, ehash[:])
if err != nil {
log.L().Error("fail to sign", zap.Error(err))
return err.Error()
}
pubKey, err := keypair.SigToPublicKey(ehash[:], sig)
if err != nil {
log.L().Error("fail to get public key", zap.Error(err))
return err.Error()
}
selp := &iotextypes.Action{
Core: elp.Proto(),
SenderPubKey: pubKey.Bytes(),
Signature: sig,
}
var confirm string
actionInfo, err := printActionProto(selp)
if err != nil {
return err.Error()
}
fmt.Println("\n" + actionInfo + "\n" +
"Please confirm your action.\n" +
"Type 'YES' to continue, quit for anything else.")
fmt.Scanf("%s", &confirm)
if confirm != "YES" && confirm != "yes" {
return "Quit"
}
fmt.Println()
request := &iotexapi.SendActionRequest{Action: selp}
conn, err := util.ConnectToEndpoint()
if err != nil {
return err.Error()
}
defer conn.Close()
cli := iotexapi.NewAPIServiceClient(conn)
ctx := context.Background()
_, err = cli.SendAction(ctx, request)
if err != nil {
log.L().Error("server error", zap.Error(err))
return err.Error()
}
shash := hash.Hash256b(byteutil.Must(proto.Marshal(selp)))
return "Action has been sent to blockchain.\n" +
"Wait for several seconds and query this action by hash:\n" +
hex.EncodeToString(shash[:])
}
| 1 | 16,472 | unnecessary conversion (from `unconvert`) | iotexproject-iotex-core | go |
@@ -345,6 +345,18 @@ final class BitMappedTrie<T> implements Serializable {
return index;
}
+ Object[] toArray() {
+ final Object[] results = new Object[length];
+ visit((index, leaf, start, end) -> {
+ final int copied = end - start;
+ for (int i = start; i < end; i++) {
+ results[index + i] = type.getAt(leaf, i);
+ }
+ return index + copied;
+ });
+ return results;
+ }
+
int length() { return length; }
}
| 1 | /* / \____ _ _ ____ ______ / \ ____ __ _______
* / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2017 Javaslang, http://javaslang.io
* /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import java.io.Serializable;
import java.util.function.Function;
import java.util.function.Predicate;
import static java.util.function.Function.identity;
import static javaslang.collection.ArrayType.obj;
import static javaslang.collection.Collections.withSize;
import static javaslang.collection.NodeModifier.COPY_NODE;
import static javaslang.collection.NodeModifier.IDENTITY;
/**
* A `bit-mapped trie` is a very wide and shallow tree (for integer indices the depth will be `≤6`).
* Each node has a maximum of `32` children (configurable).
* Access to a given position is done by converting the index to a base 32 number and using each digit to descend down the tree.
* Modifying the tree is done similarly, but along the way the path is copied, returning a new root every time.
* `Append` inserts in the last leaf, or if the tree is full from the right, it adds another layer on top of it (the old root will be the first of the new one).
* `Prepend` is done similarly, but an offset is needed, because adding a new top node (where the current root would be the last node of the new root)
* shifts the indices by half of the current tree's full size. The `offset` shifts them back to the correct index.
* `Slice` is done by trimming the path from the root and discarding any `leading`/`trailing` values in effectively constant time (without memory leak, as in `Java`/`Clojure`).
*
* @author Pap Lőrinc
* @since 2.1.0
*/
final class BitMappedTrie<T> implements Serializable {
static final int BRANCHING_BASE = 5;
static final int BRANCHING_FACTOR = 1 << BRANCHING_BASE;
static final int BRANCHING_MASK = -1 >>> -BRANCHING_BASE;
static int firstDigit(int num, int depthShift) { return num >> depthShift; }
static int digit(int num, int depthShift) { return lastDigit(firstDigit(num, depthShift)); }
static int lastDigit(int num) { return num & BRANCHING_MASK; }
private static final long serialVersionUID = 1L;
private static final BitMappedTrie<?> EMPTY = new BitMappedTrie<>(obj(), obj().empty(), 0, 0, 0);
@SuppressWarnings("unchecked")
static <T> BitMappedTrie<T> empty() { return (BitMappedTrie<T>) EMPTY; }
final ArrayType<T> type;
private final Object array;
private final int offset, length;
private final int depthShift;
private BitMappedTrie(ArrayType<T> type, Object array, int offset, int length, int depthShift) {
this.type = type;
this.array = array;
this.offset = offset;
this.length = length;
this.depthShift = depthShift;
}
private static int treeSize(int branchCount, int depthShift) {
final int fullBranchSize = 1 << depthShift;
return branchCount * fullBranchSize;
}
static <T> BitMappedTrie<T> ofAll(Object array) {
final ArrayType<T> type = ArrayType.of(array);
final int size = type.lengthOf(array);
return (size == 0) ? empty() : ofAll(array, type, size);
}
private static <T> BitMappedTrie<T> ofAll(Object array, ArrayType<T> type, int size) {
int shift = 0;
for (ArrayType<T> t = type; t.lengthOf(array) > BRANCHING_FACTOR; shift += BRANCHING_BASE) {
array = t.grouped(array, BRANCHING_FACTOR);
t = obj();
}
return new BitMappedTrie<>(type, array, 0, size, shift);
}
private BitMappedTrie<T> boxed() { return map(identity()); }
BitMappedTrie<T> prependAll(Iterable<? extends T> iterable) {
final Collections.IterableWithSize<? extends T> iter = withSize(iterable);
try {
return prepend(iter.reverseIterator(), iter.size());
} catch (ClassCastException ignored) {
return boxed().prepend(iter.reverseIterator(), iter.size());
}
}
private BitMappedTrie<T> prepend(java.util.Iterator<? extends T> iterator, int size) {
BitMappedTrie<T> result = this;
while (size > 0) {
Object array = result.array;
int shift = result.depthShift, offset = result.offset;
if (result.isFullLeft()) {
array = obj().copyUpdate(obj().empty(), BRANCHING_FACTOR - 1, array);
shift += BRANCHING_BASE;
offset = treeSize(BRANCHING_FACTOR - 1, shift);
}
final int index = offset - 1;
final int delta = Math.min(size, lastDigit(index) + 1);
size -= delta;
array = result.modify(array, shift, index, COPY_NODE, prependToLeaf(iterator));
result = new BitMappedTrie<>(type, array, offset - delta, result.length + delta, shift);
}
return result;
}
private boolean isFullLeft() { return offset == 0; }
private NodeModifier prependToLeaf(java.util.Iterator<? extends T> iterator) {
return (array, index) -> {
final Object copy = type.copy(array, BRANCHING_FACTOR);
while (iterator.hasNext() && index >= 0) {
type.setAt(copy, index--, iterator.next());
}
return copy;
};
}
BitMappedTrie<T> appendAll(Iterable<? extends T> iterable) {
final Collections.IterableWithSize<? extends T> iter = withSize(iterable);
try {
return append(iter.iterator(), iter.size());
} catch (ClassCastException ignored) {
return boxed().append(iter.iterator(), iter.size());
}
}
private BitMappedTrie<T> append(java.util.Iterator<? extends T> iterator, int size) {
BitMappedTrie<T> result = this;
while (size > 0) {
Object array = result.array;
int shift = result.depthShift;
if (result.isFullRight()) {
array = obj().asArray(array);
shift += BRANCHING_BASE;
}
final int index = offset + result.length;
final int leafSpace = lastDigit(index);
final int delta = Math.min(size, BRANCHING_FACTOR - leafSpace);
size -= delta;
array = result.modify(array, shift, index, COPY_NODE, appendToLeaf(iterator, leafSpace + delta));
result = new BitMappedTrie<>(type, array, offset, result.length + delta, shift);
}
return result;
}
private boolean isFullRight() { return (offset + length + 1) > treeSize(BRANCHING_FACTOR, depthShift); }
private NodeModifier appendToLeaf(java.util.Iterator<? extends T> iterator, int leafSize) {
return (array, index) -> {
final Object copy = type.copy(array, leafSize);
while (iterator.hasNext() && index < leafSize) {
type.setAt(copy, index++, iterator.next());
}
return copy;
};
}
BitMappedTrie<T> update(int index, T element) {
try {
final Object root = modify(array, depthShift, offset + index, COPY_NODE, updateLeafWith(type, element));
return new BitMappedTrie<>(type, root, offset, length, depthShift);
} catch (ClassCastException ignored) {
return boxed().update(index, element);
}
}
private NodeModifier updateLeafWith(ArrayType<T> type, T element) { return (a, i) -> type.copyUpdate(a, i, element); }
BitMappedTrie<T> drop(int n) {
if (n <= 0) {
return this;
} else if (n >= length) {
return empty();
} else {
final int index = offset + n;
final Object root = arePointingToSameLeaf(0, n)
? array
: modify(array, depthShift, index, obj()::copyDrop, IDENTITY);
return collapsed(type, root, index, length - n, depthShift);
}
}
BitMappedTrie<T> take(int n) {
if (n >= length) {
return this;
} else if (n <= 0) {
return empty();
} else {
final int index = n - 1;
final Object root = arePointingToSameLeaf(index, length - 1)
? array
: modify(array, depthShift, offset + index, obj()::copyTake, IDENTITY);
return collapsed(type, root, offset, n, depthShift);
}
}
private boolean arePointingToSameLeaf(int i, int j) {
return firstDigit(offset + i, BRANCHING_BASE) == firstDigit(offset + j, BRANCHING_BASE);
}
/* drop root node while it has a single element */
private static <T> BitMappedTrie<T> collapsed(ArrayType<T> type, Object array, int offset, int length, int shift) {
for (; shift > 0; shift -= BRANCHING_BASE) {
final int skippedElements = obj().lengthOf(array) - 1;
if (skippedElements != digit(offset, shift)) {
break;
}
array = obj().getAt(array, skippedElements);
offset -= treeSize(skippedElements, shift);
}
return new BitMappedTrie<>(type, array, offset, length, shift);
}
/* descend the tree from root to leaf, applying the given modifications along the way, returning the new root */
private Object modify(Object root, int depthShift, int index, NodeModifier node, NodeModifier leaf) {
return (depthShift == 0)
? leaf.apply(root, index)
: modifyNonLeaf(root, depthShift, index, node, leaf);
}
private Object modifyNonLeaf(Object root, int depthShift, int index, NodeModifier node, NodeModifier leaf) {
int previousIndex = firstDigit(index, depthShift);
root = node.apply(root, previousIndex);
Object array = root;
for (int shift = depthShift - BRANCHING_BASE; shift >= BRANCHING_BASE; shift -= BRANCHING_BASE) {
final int prev = previousIndex;
previousIndex = digit(index, shift);
array = setNewNode(node, prev, array, previousIndex);
}
final Object newLeaf = leaf.apply(obj().getAt(array, previousIndex), lastDigit(index));
obj().setAt(array, previousIndex, newLeaf);
return root;
}
private Object setNewNode(NodeModifier node, int previousIndex, Object array, int offset) {
final Object previous = obj().getAt(array, previousIndex);
final Object newNode = node.apply(previous, offset);
obj().setAt(array, previousIndex, newNode);
return newNode;
}
T get(int index) {
final Object leaf = getLeaf(index);
final int leafIndex = lastDigit(offset + index);
return type.getAt(leaf, leafIndex);
}
/**
* fetch the leaf, corresponding to the given index.
* Node: the offset and length should be taken into consideration as there may be leading and trailing garbage.
* Also, the returned array is mutable, but should not be mutated!
*/
@SuppressWarnings("WeakerAccess")
Object getLeaf(int index) {
if (depthShift == 0) {
return array;
} else {
return getLeafGeneral(index);
}
}
private Object getLeafGeneral(int index) {
index += offset;
Object leaf = obj().getAt(array, firstDigit(index, depthShift));
for (int shift = depthShift - BRANCHING_BASE; shift > 0; shift -= BRANCHING_BASE) {
leaf = obj().getAt(leaf, digit(index, shift));
}
return leaf;
}
Iterator<T> iterator() {
return new Iterator<T>() {
private final int globalLength = BitMappedTrie.this.length;
private int globalIndex = 0;
private int index = lastDigit(offset);
private Object leaf = getLeaf(globalIndex);
private int length = type.lengthOf(leaf);
@Override
public boolean hasNext() { return globalIndex < globalLength; }
@Override
public T next() {
if (index == length) { setCurrentArray(); }
final T next = type.getAt(leaf, index);
index++;
globalIndex++;
return next;
}
private void setCurrentArray() {
index = 0;
leaf = getLeaf(globalIndex);
length = type.lengthOf(leaf);
}
};
}
@SuppressWarnings("unchecked")
<T2> int visit(LeafVisitor<T2> visitor) {
int globalIndex = 0, start = lastDigit(offset);
for (int index = 0; index < length; ) {
final T2 leaf = (T2) getLeaf(index);
final int end = getMin(start, index, leaf);
globalIndex = visitor.visit(globalIndex, leaf, start, end);
index += end - start;
start = 0;
}
return globalIndex;
}
private int getMin(int start, int index, Object leaf) { return Math.min(type.lengthOf(leaf), start + length - index); }
BitMappedTrie<T> filter(Predicate<? super T> predicate) {
final Object results = type.newInstance(length());
final int length = this.<T> visit((index, leaf, start, end) -> filter(predicate, results, index, leaf, start, end));
return (this.length == length)
? this
: BitMappedTrie.ofAll(type.copyRange(results, 0, length));
}
private int filter(Predicate<? super T> predicate, Object results, int index, T leaf, int start, int end) {
for (int i = start; i < end; i++) {
final T value = type.getAt(leaf, i);
if (predicate.test(value)) {
type.setAt(results, index++, value);
}
}
return index;
}
<U> BitMappedTrie<U> map(Function<? super T, ? extends U> mapper) {
final Object results = obj().newInstance(length);
this.<T> visit((index, leaf, start, end) -> map(mapper, results, index, leaf, start, end));
return BitMappedTrie.ofAll(results);
}
private <U> int map(Function<? super T, ? extends U> mapper, Object results, int index, T leaf, int start, int end) {
for (int i = start; i < end; i++) {
obj().setAt(results, index++, mapper.apply(type.getAt(leaf, i)));
}
return index;
}
int length() { return length; }
}
@FunctionalInterface
interface NodeModifier {
Object apply(Object array, int index);
NodeModifier COPY_NODE = (o, i) -> obj().copy(o, i + 1);
NodeModifier IDENTITY = (o, i) -> o;
}
@FunctionalInterface
interface LeafVisitor<T> {
int visit(int index, T leaf, int start, int end);
}
| 1 | 11,789 | ... is probably automatically vectorized, since it has the same speed as `System.arraycopy` | vavr-io-vavr | java |
@@ -15,9 +15,19 @@
package engine
+import (
+ "github.com/aws/amazon-ecs-agent/agent/resources/cgroup"
+ "github.com/cihub/seelog"
+ "github.com/pkg/errors"
+)
+
+// control is used to manipulate cgroups and ease testing
+var control cgroup.Control
+
// SetupPlatformResources sets up platform level resources
func (mtask *managedTask) SetupPlatformResources() error {
if mtask.Task.CgroupEnabled() {
+ setControl(cgroup.New())
return mtask.setupCgroup()
}
return nil | 1 | // +build linux
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package engine
// SetupPlatformResources sets up platform level resources
func (mtask *managedTask) SetupPlatformResources() error {
if mtask.Task.CgroupEnabled() {
return mtask.setupCgroup()
}
return nil
}
// CleanupPlatformResources cleans up platform level resources
func (mtask *managedTask) CleanupPlatformResources() error {
if mtask.Task.CgroupEnabled() {
return mtask.cleanupCgroup()
}
return nil
}
// setupCgroup sets up the cgroup for each managed task
func (mtask *managedTask) setupCgroup() error {
// Stub for now
return nil
}
// cleanupCgroup removes the task cgroup
func (mtask *managedTask) cleanupCgroup() error {
// Stub for now
return nil
}
| 1 | 17,057 | Is there anyway to get rid of the global variable, if it is specific for each task, can you add it to the `mtask` struct? | aws-amazon-ecs-agent | go |
@@ -77,12 +77,12 @@ func TestDeserializeFromFlatBuffer(t *testing.T) {
{
name: "simple unary expr",
fbFn: getUnaryOpFlatBuffer,
- polyType: `forall [] float`,
+ polyType: `float`,
},
{
name: "function expression",
fbFn: getFnExprFlatBuffer,
- polyType: `forall [t0, t1] (a: t0, <-b: t1, ?c: int) -> int`,
+ polyType: `(a: A, <-b: B, ?c: int) => int`,
},
}
| 1 | package semantic_test
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"testing"
flatbuffers "github.com/google/flatbuffers/go"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/flux/ast"
"github.com/influxdata/flux/internal/fbsemantic"
"github.com/influxdata/flux/parser"
"github.com/influxdata/flux/runtime"
"github.com/influxdata/flux/semantic"
)
var cmpOpts = []cmp.Option{
cmp.AllowUnexported(
semantic.ArrayExpression{},
semantic.BinaryExpression{},
semantic.Block{},
semantic.CallExpression{},
semantic.ConditionalExpression{},
semantic.DateTimeLiteral{},
semantic.DurationLiteral{},
semantic.ExpressionStatement{},
semantic.File{},
semantic.FloatLiteral{},
semantic.FunctionExpression{},
semantic.FunctionParameters{},
semantic.FunctionParameter{},
semantic.IdentifierExpression{},
semantic.Identifier{},
semantic.ImportDeclaration{},
semantic.IndexExpression{},
semantic.IntegerLiteral{},
semantic.InterpolatedPart{},
semantic.LogicalExpression{},
semantic.MemberAssignment{},
semantic.MemberExpression{},
semantic.NativeVariableAssignment{},
semantic.ObjectExpression{},
semantic.OptionStatement{},
semantic.Package{},
semantic.PackageClause{},
semantic.RegexpLiteral{},
semantic.Property{},
semantic.ReturnStatement{},
semantic.StringExpression{},
semantic.StringLiteral{},
semantic.TestStatement{},
semantic.TextPart{},
semantic.UnaryExpression{},
),
cmp.Transformer("regexp", func(re *regexp.Regexp) string {
return re.String()
}),
// Just ignore types when comparing against Go semantic graph, since
// Go does not annotate expressions nodes with types directly.
cmp.Transformer("", func(ty semantic.MonoType) int {
return 0
}),
cmp.Transformer("", func(ty semantic.PolyType) int {
return 0
}),
}
func TestDeserializeFromFlatBuffer(t *testing.T) {
tcs := []struct {
name string
fbFn func() (*semantic.Package, []byte)
polyType string
}{
{
name: "simple unary expr",
fbFn: getUnaryOpFlatBuffer,
polyType: `forall [] float`,
},
{
name: "function expression",
fbFn: getFnExprFlatBuffer,
polyType: `forall [t0, t1] (a: t0, <-b: t1, ?c: int) -> int`,
},
}
for _, tc := range tcs {
tc := tc
t.Run(tc.name, func(t *testing.T) {
want, fb := tc.fbFn()
got, err := semantic.DeserializeFromFlatBuffer(fb)
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(want, got, cmpOpts...); diff != "" {
t.Fatalf("unexpected semantic graph: -want/+got:\n%v", diff)
}
// Make sure the polytype looks as expected
pt := got.Files[0].Body[0].(*semantic.NativeVariableAssignment).Typ
if diff := cmp.Diff(tc.polyType, pt.String()); diff != "" {
t.Fatalf("unexpected polytype: -want/+got:\n%v", diff)
}
})
}
}
func getUnaryOpFlatBuffer() (*semantic.Package, []byte) {
src := `x = -3.5`
b := flatbuffers.NewBuilder(256)
// let's test out a unary expression using a float
litLoc := getFBLoc(b, "1:6", "1:9", src)
fty := getFBBasicType(b, fbsemantic.TypeFloat)
fbsemantic.FloatLiteralStart(b)
fbsemantic.FloatLiteralAddLoc(b, litLoc)
fbsemantic.FloatLiteralAddValue(b, 3.5)
floatval := fbsemantic.FloatLiteralEnd(b)
exprLoc := getFBLoc(b, "1:5", "1:9", src)
fbsemantic.UnaryExpressionStart(b)
fbsemantic.UnaryExpressionAddLoc(b, exprLoc)
fbsemantic.UnaryExpressionAddTypType(b, fbsemantic.MonoTypeBasic)
fbsemantic.UnaryExpressionAddTyp(b, fty)
fbsemantic.UnaryExpressionAddOperator(b, fbsemantic.OperatorSubtractionOperator)
fbsemantic.UnaryExpressionAddArgumentType(b, fbsemantic.ExpressionFloatLiteral)
fbsemantic.UnaryExpressionAddArgument(b, floatval)
negate := fbsemantic.UnaryExpressionEnd(b)
str := b.CreateString("x")
idLoc := getFBLoc(b, "1:1", "1:2", src)
fbsemantic.IdentifierStart(b)
fbsemantic.IdentifierAddLoc(b, idLoc)
fbsemantic.IdentifierAddName(b, str)
id := fbsemantic.IdentifierEnd(b)
asnLoc := getFBLoc(b, "1:1", "1:9", src)
ty := getFBPolyType(b, fty)
fbsemantic.NativeVariableAssignmentStart(b)
fbsemantic.NativeVariableAssignmentAddLoc(b, asnLoc)
fbsemantic.NativeVariableAssignmentAddTyp(b, ty)
fbsemantic.NativeVariableAssignmentAddIdentifier(b, id)
fbsemantic.NativeVariableAssignmentAddInit_(b, negate)
fbsemantic.NativeVariableAssignmentAddInit_type(b, fbsemantic.ExpressionUnaryExpression)
nva := fbsemantic.NativeVariableAssignmentEnd(b)
want := &semantic.Package{
Package: "main",
Files: []*semantic.File{{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 1},
End: ast.Position{Line: 1, Column: 9},
Source: `x = -3.5`,
},
Body: []semantic.Statement{
&semantic.NativeVariableAssignment{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 1},
End: ast.Position{Line: 1, Column: 9},
Source: `x = -3.5`,
},
Identifier: &semantic.Identifier{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 1},
End: ast.Position{Line: 1, Column: 2},
Source: `x`,
},
Name: "x",
},
Init: &semantic.UnaryExpression{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 5},
End: ast.Position{Line: 1, Column: 9},
Source: `-3.5`,
},
Operator: ast.SubtractionOperator,
Argument: &semantic.FloatLiteral{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 6},
End: ast.Position{Line: 1, Column: 9},
Source: `3.5`,
},
Value: 3.5,
},
},
},
},
}},
}
return want, doStatementBoilerplate(b, fbsemantic.StatementNativeVariableAssignment, nva, asnLoc)
}
func getFnExprFlatBuffer() (*semantic.Package, []byte) {
src := `f = (a, b=<-, c=72) => { return c }`
b := flatbuffers.NewBuilder(256)
p0loc := getFBLoc(b, "1:6", "1:7", src)
p0n := b.CreateString("a")
fbsemantic.IdentifierStart(b)
fbsemantic.IdentifierAddName(b, p0n)
fbsemantic.IdentifierAddLoc(b, p0loc)
p0k := fbsemantic.IdentifierEnd(b)
fbsemantic.FunctionParameterStart(b)
fbsemantic.FunctionParameterAddKey(b, p0k)
fbsemantic.FunctionParameterAddLoc(b, p0loc)
param0 := fbsemantic.FunctionParameterEnd(b)
p1loc := getFBLoc(b, "1:9", "1:10", src)
p1n := b.CreateString("b")
fbsemantic.IdentifierStart(b)
fbsemantic.IdentifierAddName(b, p1n)
fbsemantic.IdentifierAddLoc(b, p1loc)
p1k := fbsemantic.IdentifierEnd(b)
p1loc = getFBLoc(b, "1:9", "1:13", src)
fbsemantic.FunctionParameterStart(b)
fbsemantic.FunctionParameterAddLoc(b, p1loc)
fbsemantic.FunctionParameterAddKey(b, p1k)
fbsemantic.FunctionParameterAddIsPipe(b, true)
param1 := fbsemantic.FunctionParameterEnd(b)
p2loc := getFBLoc(b, "1:15", "1:16", src)
p2n := b.CreateString("c")
fbsemantic.IdentifierStart(b)
fbsemantic.IdentifierAddLoc(b, p2loc)
fbsemantic.IdentifierAddName(b, p2n)
p2k := fbsemantic.IdentifierEnd(b)
// default value
dloc := getFBLoc(b, "1:17", "1:19", src)
intTy := getFBBasicType(b, fbsemantic.TypeInt)
fbsemantic.IntegerLiteralStart(b)
fbsemantic.IntegerLiteralAddLoc(b, dloc)
fbsemantic.IntegerLiteralAddValue(b, 72)
def := fbsemantic.IntegerLiteralEnd(b)
p2loc = getFBLoc(b, "1:15", "1:19", src)
fbsemantic.FunctionParameterStart(b)
fbsemantic.FunctionParameterAddLoc(b, p2loc)
fbsemantic.FunctionParameterAddKey(b, p2k)
fbsemantic.FunctionParameterAddDefault(b, def)
fbsemantic.FunctionParameterAddDefaultType(b, fbsemantic.ExpressionIntegerLiteral)
param2 := fbsemantic.FunctionParameterEnd(b)
fbsemantic.FunctionExpressionStartParamsVector(b, 3)
b.PrependUOffsetT(param2)
b.PrependUOffsetT(param1)
b.PrependUOffsetT(param0)
params := b.EndVector(3)
idLoc := getFBLoc(b, "1:33", "1:34", src)
name := b.CreateString("c")
fbsemantic.IdentifierExpressionStart(b)
fbsemantic.IdentifierExpressionAddLoc(b, idLoc)
fbsemantic.IdentifierExpressionAddTypType(b, fbsemantic.MonoTypeBasic)
fbsemantic.IdentifierExpressionAddTyp(b, intTy)
fbsemantic.IdentifierExpressionAddName(b, name)
idExpr := fbsemantic.IdentifierExpressionEnd(b)
retLoc := getFBLoc(b, "1:26", "1:34", src)
fbsemantic.ReturnStatementStart(b)
fbsemantic.ReturnStatementAddLoc(b, retLoc)
fbsemantic.ReturnStatementAddArgument(b, idExpr)
fbsemantic.ReturnStatementAddArgumentType(b, fbsemantic.ExpressionIdentifierExpression)
retStmt := fbsemantic.ReturnStatementEnd(b)
fbsemantic.WrappedStatementStart(b)
fbsemantic.WrappedStatementAddStatement(b, retStmt)
fbsemantic.WrappedStatementAddStatementType(b, fbsemantic.StatementReturnStatement)
wrappedStmt := fbsemantic.WrappedExpressionEnd(b)
fbsemantic.BlockStartBodyVector(b, 1)
b.PrependUOffsetT(wrappedStmt)
stmts := b.EndVector(1)
bloc := getFBLoc(b, "1:24", "1:36", src)
fbsemantic.BlockStart(b)
fbsemantic.BlockAddLoc(b, bloc)
fbsemantic.BlockAddBody(b, stmts)
body := fbsemantic.BlockEnd(b)
funTy := getFnMonoType(b)
exprLoc := getFBLoc(b, "1:5", "1:36", src)
fbsemantic.FunctionExpressionStart(b)
fbsemantic.FunctionExpressionAddBody(b, body)
fbsemantic.FunctionExpressionAddParams(b, params)
fbsemantic.FunctionExpressionAddLoc(b, exprLoc)
fbsemantic.FunctionExpressionAddTyp(b, funTy)
fbsemantic.FunctionExpressionAddTypType(b, fbsemantic.MonoTypeFun)
fe := fbsemantic.FunctionExpressionEnd(b)
str := b.CreateString("f")
idLoc = getFBLoc(b, "1:1", "1:2", src)
fbsemantic.IdentifierStart(b)
fbsemantic.IdentifierAddLoc(b, idLoc)
fbsemantic.IdentifierAddName(b, str)
id := fbsemantic.IdentifierEnd(b)
pt := getFnPolyType(b)
asnLoc := getFBLoc(b, "1:1", "1:36", src)
fbsemantic.NativeVariableAssignmentStart(b)
fbsemantic.NativeVariableAssignmentAddLoc(b, asnLoc)
fbsemantic.NativeVariableAssignmentAddTyp(b, pt)
fbsemantic.NativeVariableAssignmentAddIdentifier(b, id)
fbsemantic.NativeVariableAssignmentAddInit_(b, fe)
fbsemantic.NativeVariableAssignmentAddInit_type(b, fbsemantic.ExpressionFunctionExpression)
nva := fbsemantic.NativeVariableAssignmentEnd(b)
want := &semantic.Package{
Package: "main",
Files: []*semantic.File{{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 1},
End: ast.Position{Line: 1, Column: 36},
Source: `f = (a, b=<-, c=72) => { return c }`,
},
Body: []semantic.Statement{
&semantic.NativeVariableAssignment{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 1},
End: ast.Position{Line: 1, Column: 36},
Source: `f = (a, b=<-, c=72) => { return c }`,
},
Identifier: &semantic.Identifier{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 1},
End: ast.Position{Line: 1, Column: 2},
Source: `f`,
},
Name: "f",
},
Init: &semantic.FunctionExpression{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 5},
End: ast.Position{Line: 1, Column: 36},
Source: `(a, b=<-, c=72) => { return c }`,
},
Parameters: &semantic.FunctionParameters{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 5},
End: ast.Position{Line: 1, Column: 36},
Source: `(a, b=<-, c=72) => { return c }`,
},
List: []*semantic.FunctionParameter{
{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 6},
End: ast.Position{Line: 1, Column: 7},
Source: `a`,
},
Key: &semantic.Identifier{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 6},
End: ast.Position{Line: 1, Column: 7},
Source: `a`,
},
Name: "a",
},
},
{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 9},
End: ast.Position{Line: 1, Column: 13},
Source: `b=<-`,
},
Key: &semantic.Identifier{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 9},
End: ast.Position{Line: 1, Column: 10},
Source: `b`,
},
Name: "b",
},
},
{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 15},
End: ast.Position{Line: 1, Column: 19},
Source: `c=72`,
},
Key: &semantic.Identifier{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 15},
End: ast.Position{Line: 1, Column: 16},
Source: `c`,
},
Name: "c",
},
},
},
Pipe: &semantic.Identifier{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 9},
End: ast.Position{Line: 1, Column: 10},
Source: `b`,
},
Name: "b",
},
},
Defaults: &semantic.ObjectExpression{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 5},
End: ast.Position{Line: 1, Column: 36},
Source: `(a, b=<-, c=72) => { return c }`,
},
Properties: []*semantic.Property{
{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 15},
End: ast.Position{Line: 1, Column: 19},
Source: `c=72`,
},
Key: &semantic.Identifier{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 15},
End: ast.Position{Line: 1, Column: 16},
Source: `c`,
},
Name: "c",
},
Value: &semantic.IntegerLiteral{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 17},
End: ast.Position{Line: 1, Column: 19},
Source: `72`,
},
Value: 72,
},
},
},
},
Block: &semantic.Block{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 24},
End: ast.Position{Line: 1, Column: 36},
Source: `{ return c }`,
},
Body: []semantic.Statement{
&semantic.ReturnStatement{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 26},
End: ast.Position{Line: 1, Column: 34},
Source: `return c`,
},
Argument: &semantic.IdentifierExpression{
Loc: semantic.Loc{
Start: ast.Position{Line: 1, Column: 33},
End: ast.Position{Line: 1, Column: 34},
Source: `c`,
},
Name: "c",
},
},
},
},
},
},
},
}},
}
return want, doStatementBoilerplate(b, fbsemantic.StatementNativeVariableAssignment, nva, asnLoc)
}
func getFBBasicType(b *flatbuffers.Builder, t fbsemantic.Type) flatbuffers.UOffsetT {
fbsemantic.BasicStart(b)
fbsemantic.BasicAddT(b, t)
return fbsemantic.BasicEnd(b)
}
func getFBPolyType(b *flatbuffers.Builder, mt flatbuffers.UOffsetT) flatbuffers.UOffsetT {
fbsemantic.PolyTypeStartVarsVector(b, 0)
varsVec := b.EndVector(0)
fbsemantic.PolyTypeStartConsVector(b, 0)
consVec := b.EndVector(0)
fbsemantic.PolyTypeStart(b)
fbsemantic.PolyTypeAddVars(b, varsVec)
fbsemantic.PolyTypeAddCons(b, consVec)
fbsemantic.PolyTypeAddExprType(b, fbsemantic.MonoTypeBasic)
fbsemantic.PolyTypeAddExpr(b, mt)
return fbsemantic.PolyTypeEnd(b)
}
func getFnPolyType(b *flatbuffers.Builder) flatbuffers.UOffsetT {
// The type of `(a, b=<-, c=72) => { return c }`
// is `forall [t0, t1] (a: t0, <-b: t1, ?c: int) -> int`
fbsemantic.VarStart(b)
fbsemantic.VarAddI(b, 0)
t0 := fbsemantic.VarEnd(b)
fbsemantic.VarStart(b)
fbsemantic.VarAddI(b, 1)
t1 := fbsemantic.VarEnd(b)
fbsemantic.PolyTypeStartVarsVector(b, 2)
b.PrependUOffsetT(t1)
b.PrependUOffsetT(t0)
varsVec := b.EndVector(2)
fbsemantic.PolyTypeStartConsVector(b, 0)
consVec := b.EndVector(0)
fun := getFnMonoType(b)
fbsemantic.PolyTypeStart(b)
fbsemantic.PolyTypeAddVars(b, varsVec)
fbsemantic.PolyTypeAddCons(b, consVec)
fbsemantic.PolyTypeAddExprType(b, fbsemantic.MonoTypeFun)
fbsemantic.PolyTypeAddExpr(b, fun)
return fbsemantic.PolyTypeEnd(b)
}
func getFnMonoType(b *flatbuffers.Builder) flatbuffers.UOffsetT {
intTy := getFBBasicType(b, fbsemantic.TypeInt)
fbsemantic.VarStart(b)
fbsemantic.VarAddI(b, 0)
t0 := fbsemantic.VarEnd(b)
fbsemantic.VarStart(b)
fbsemantic.VarAddI(b, 1)
t1 := fbsemantic.VarEnd(b)
an := b.CreateString("a")
fbsemantic.ArgumentStart(b)
fbsemantic.ArgumentAddName(b, an)
fbsemantic.ArgumentAddTType(b, fbsemantic.MonoTypeVar)
fbsemantic.ArgumentAddT(b, t0)
aa := fbsemantic.ArgumentEnd(b)
bn := b.CreateString("b")
fbsemantic.ArgumentStart(b)
fbsemantic.ArgumentAddName(b, bn)
fbsemantic.ArgumentAddTType(b, fbsemantic.MonoTypeVar)
fbsemantic.ArgumentAddT(b, t1)
fbsemantic.ArgumentAddPipe(b, true)
ba := fbsemantic.ArgumentEnd(b)
cn := b.CreateString("c")
fbsemantic.ArgumentStart(b)
fbsemantic.ArgumentAddName(b, cn)
fbsemantic.ArgumentAddTType(b, fbsemantic.MonoTypeBasic)
fbsemantic.ArgumentAddT(b, intTy)
fbsemantic.ArgumentAddOptional(b, true)
ca := fbsemantic.ArgumentEnd(b)
fbsemantic.FunStartArgsVector(b, 3)
b.PrependUOffsetT(ca)
b.PrependUOffsetT(ba)
b.PrependUOffsetT(aa)
args := b.EndVector(3)
fbsemantic.FunStart(b)
fbsemantic.FunAddArgs(b, args)
fbsemantic.FunAddRetnType(b, fbsemantic.MonoTypeBasic)
fbsemantic.FunAddRetn(b, intTy)
return fbsemantic.FunEnd(b)
}
func doStatementBoilerplate(builder *flatbuffers.Builder, stmtType fbsemantic.Statement, stmtOffset, locOffset flatbuffers.UOffsetT) []byte {
fbsemantic.WrappedStatementStart(builder)
fbsemantic.WrappedStatementAddStatementType(builder, stmtType)
fbsemantic.WrappedStatementAddStatement(builder, stmtOffset)
wrappedStatement := fbsemantic.WrappedExpressionEnd(builder)
fbsemantic.FileStartBodyVector(builder, 1)
builder.PrependUOffsetT(wrappedStatement)
body := builder.EndVector(1)
fbsemantic.FileStart(builder)
fbsemantic.FileAddLoc(builder, locOffset)
fbsemantic.FileAddBody(builder, body)
file := fbsemantic.FileEnd(builder)
fbsemantic.PackageStartFilesVector(builder, 1)
builder.PrependUOffsetT(file)
files := builder.EndVector(1)
pkgName := builder.CreateString("main")
fbsemantic.PackageStart(builder)
fbsemantic.PackageClauseAddName(builder, pkgName)
fbsemantic.PackageAddFiles(builder, files)
pkg := fbsemantic.PackageEnd(builder)
builder.Finish(pkg)
return builder.FinishedBytes()
}
func getFBLoc(builder *flatbuffers.Builder, start, end, src string) flatbuffers.UOffsetT {
l := getLoc(start, end, src)
fbSrc := builder.CreateString(l.Source)
fbsemantic.SourceLocationStart(builder)
startPos := fbsemantic.CreatePosition(builder, int32(l.Start.Line), int32(l.Start.Column))
fbsemantic.SourceLocationAddStart(builder, startPos)
endPos := fbsemantic.CreatePosition(builder, int32(l.End.Line), int32(l.End.Column))
fbsemantic.SourceLocationAddEnd(builder, endPos)
fbsemantic.SourceLocationAddSource(builder, fbSrc)
return fbsemantic.SourceLocationEnd(builder)
}
func getLoc(start, end, src string) *ast.SourceLocation {
toloc := func(s string) ast.Position {
parts := strings.SplitN(s, ":", 2)
line, _ := strconv.Atoi(parts[0])
column, _ := strconv.Atoi(parts[1])
return ast.Position{
Line: line,
Column: column,
}
}
l := &ast.SourceLocation{
Start: toloc(start),
End: toloc(end),
}
l.Source = source(src, l)
return l
}
func source(src string, loc *ast.SourceLocation) string {
if loc == nil ||
loc.Start.Line == 0 || loc.Start.Column == 0 ||
loc.End.Line == 0 || loc.End.Column == 0 {
return ""
}
soffset := 0
for i := loc.Start.Line - 1; i > 0; i-- {
o := strings.Index(src[soffset:], "\n")
if o == -1 {
return ""
}
soffset += o + 1
}
soffset += loc.Start.Column - 1
eoffset := 0
for i := loc.End.Line - 1; i > 0; i-- {
o := strings.Index(src[eoffset:], "\n")
if o == -1 {
return ""
}
eoffset += o + 1
}
eoffset += loc.End.Column - 1
if soffset >= len(src) || eoffset > len(src) || soffset > eoffset {
return "<invalid offsets>"
}
return src[soffset:eoffset]
}
// MyAssignment is a special struct used only
// for comparing NativeVariableAssignments with
// PolyTypes provided by a test case.
type MyAssignement struct {
semantic.Loc
Identifier *semantic.Identifier
Init semantic.Expression
Typ string
}
// transformGraph takes a semantic graph produced by Go, and modifies it
// so it looks like something produced by Rust.
// The differences do not affect program behavior at runtime.
func transformGraph(pkg *semantic.Package) error {
semantic.Walk(&transformingVisitor{}, pkg)
return nil
}
type transformingVisitor struct{}
func (tv *transformingVisitor) Visit(node semantic.Node) semantic.Visitor {
return tv
}
// toMonthsAndNanos takes a slice of durations,
// and represents them as months and nanoseconds,
// which is how durations are represented in a flatbuffer.
func toMonthsAndNanos(ds []ast.Duration) []ast.Duration {
var ns int64
var mos int64
for _, d := range ds {
switch d.Unit {
case ast.NanosecondUnit:
ns += d.Magnitude
case ast.MicrosecondUnit:
ns += 1000 * d.Magnitude
case ast.MillisecondUnit:
ns += 1000000 * d.Magnitude
case ast.SecondUnit:
ns += 1000000000 * d.Magnitude
case ast.MinuteUnit:
ns += 60 * 1000000000 * d.Magnitude
case ast.HourUnit:
ns += 60 * 60 * 1000000000 * d.Magnitude
case ast.DayUnit:
ns += 24 * 60 * 60 * 1000000000 * d.Magnitude
case ast.WeekUnit:
ns += 7 * 24 * 60 * 60 * 1000000000 * d.Magnitude
case ast.MonthUnit:
mos += d.Magnitude
case ast.YearUnit:
mos += 12 * d.Magnitude
default:
}
}
outDurs := make([]ast.Duration, 2)
outDurs[0] = ast.Duration{Magnitude: mos, Unit: ast.MonthUnit}
outDurs[1] = ast.Duration{Magnitude: ns, Unit: ast.NanosecondUnit}
return outDurs
}
func (tv *transformingVisitor) Done(node semantic.Node) {
switch n := node.(type) {
case *semantic.CallExpression:
// Rust call expr args are just an array, so there's no location info.
n.Arguments.Source = ""
case *semantic.DurationLiteral:
// Rust duration literals use the months + nanos representation,
// Go uses AST units.
n.Values = toMonthsAndNanos(n.Values)
case *semantic.File:
if len(n.Body) == 0 {
n.Body = nil
}
case *semantic.FunctionExpression:
// Blocks in Rust models blocks as linked lists, so we don't have a location for the
// entire block including the curly braces. It uses location of the statements instead.
nStmts := len(n.Block.Body)
n.Block.Start = n.Block.Body[0].Location().Start
n.Block.End = n.Block.Body[nStmts-1].Location().End
n.Block.Source = ""
}
}
var tvarRegexp *regexp.Regexp = regexp.MustCompile("t[0-9]+")
// canonicalizeError reindexes type variable numbers in error messages
// starting from zero, so that tests don't fail when the stdlib is updated.
func canonicalizeError(errMsg string) string {
count := 0
tvm := make(map[int]int)
return tvarRegexp.ReplaceAllStringFunc(errMsg, func(in string) string {
n, err := strconv.Atoi(in[1:])
if err != nil {
panic(err)
}
var nn int
var ok bool
if nn, ok = tvm[n]; !ok {
nn = count
count++
tvm[n] = nn
}
t := fmt.Sprintf("t%v", nn)
return t
})
}
type exprTypeChecker struct {
errs []error
}
func (e *exprTypeChecker) Visit(node semantic.Node) semantic.Visitor {
return e
}
func (e *exprTypeChecker) Done(node semantic.Node) {
nva, ok := node.(*semantic.NativeVariableAssignment)
if !ok {
return
}
pty := nva.Typ.String()
initTy := nva.Init.TypeOf().String()
if !strings.Contains(pty, initTy) {
err := fmt.Errorf("expected RHS of assignment for %q to have a type contained by %q, but it had %q", nva.Identifier.Name, pty, initTy)
e.errs = append(e.errs, err)
}
}
func checkExprTypes(pkg *semantic.Package) []error {
v := new(exprTypeChecker)
semantic.Walk(v, pkg)
return v.errs
}
func TestFlatBuffersRoundTrip(t *testing.T) {
tcs := []struct {
name string
fluxSrc string
err error
// For each variable assignment, the expected inferred type of the variable
types map[string]string
}{
{
name: "package",
fluxSrc: `package foo`,
},
{
name: "import",
fluxSrc: `
import "math"
import c "csv"`,
},
{
name: "option with assignment",
fluxSrc: `option o = "hello"`,
types: map[string]string{
"o": "forall [] string",
},
},
{
name: "option with member assignment error",
fluxSrc: `option o.m = "hello"`,
err: errors.New("error @1:8-1:9: undefined identifier o"),
},
{
name: "option with member assignment",
fluxSrc: `
import "influxdata/influxdb/monitor"
option monitor.log = (tables=<-) => tables`,
},
{
name: "builtin statement",
fluxSrc: `builtin foo : int`,
},
{
name: "test statement",
fluxSrc: `
import "testing"
test t = () => ({input: testing.loadStorage(csv: ""), want: testing.loadMem(csv: ""), fn: (table=<-) => table})`,
types: map[string]string{
"t": "forall [t0, t1, t2, t3, t4] where t4: Record () -> {fn: (<-table: t0) -> t0 | input: [{_field: t1 | _field: t1 | _measurement: t2 | _measurement: t2 | _start: time | _stop: time | _time: time | _time: time | t3}] | want: [t4]}",
},
},
{
name: "expression statement",
fluxSrc: `42`,
},
{
name: "native variable assignment",
fluxSrc: `x = 42`,
types: map[string]string{
"x": "forall [] int",
},
},
{
name: "string expression",
fluxSrc: `
str = "hello"
x = "${str} world"`,
types: map[string]string{
"str": "forall [] string",
"x": "forall [] string",
},
},
{
name: "array expression/index expression",
fluxSrc: `
x = [1, 2, 3]
y = x[2]`,
types: map[string]string{
"x": "forall [] [int]",
"y": "forall [] int",
},
},
{
name: "simple fn",
fluxSrc: `f = (x) => x`,
types: map[string]string{
"f": "forall [t0] (x: t0) -> t0",
},
},
{
name: "simple fn with block (return statement)",
fluxSrc: `f = (x) => {return x}`,
types: map[string]string{
"f": "forall [t0] (x: t0) -> t0",
},
},
{
name: "simple fn with 2 stmts",
fluxSrc: `
f = (x) => {
z = x + 1
127 // expr statement
return z
}`,
types: map[string]string{
"f": "forall [] (x: int) -> int",
"z": "forall [] int",
},
},
{
name: "simple fn with 2 params",
fluxSrc: `f = (x, y) => x + y`,
types: map[string]string{
"f": "forall [t0] where t0: Addable (x: t0, y: t0) -> t0",
},
},
{
name: "apply",
fluxSrc: `apply = (f, p) => f(param: p)`,
types: map[string]string{
"apply": "forall [t0, t1] (f: (param: t0) -> t1, p: t0) -> t1",
},
},
{
name: "apply2",
fluxSrc: `apply2 = (f, p0, p1) => f(param0: p0, param1: p1)`,
types: map[string]string{
"apply2": "forall [t0, t1, t2] (f: (param0: t0, param1: t1) -> t2, p0: t0, p1: t1) -> t2",
},
},
{
name: "default args",
fluxSrc: `f = (x=1, y) => x + y`,
types: map[string]string{
"f": "forall [] (?x: int, y: int) -> int",
},
},
{
name: "two default args",
fluxSrc: `f = (x=1, y=10, z) => x + y + z`,
types: map[string]string{
"f": "forall [] (?x: int, ?y: int, z: int) -> int",
},
},
{
name: "pipe args",
fluxSrc: `f = (x=<-, y) => x + y`,
types: map[string]string{
"f": "forall [t0] where t0: Addable (<-x: t0, y: t0) -> t0",
},
},
{
name: "binary expression",
fluxSrc: `
x = 1 * 2 / 3 - 1 + 7 % 8^9
lt = 1 < 3
lte = 1 <= 3
gt = 1 > 3
gte = 1 >= 3
eq = 1 == 3
neq = 1 != 3
rem = "foo" =~ /foo/
renm = "food" !~ /foog/`,
types: map[string]string{
"x": "forall [] int",
"lt": "forall [] bool",
"lte": "forall [] bool",
"gt": "forall [] bool",
"gte": "forall [] bool",
"eq": "forall [] bool",
"neq": "forall [] bool",
"rem": "forall [] bool",
"renm": "forall [] bool",
},
},
{
name: "call expression",
fluxSrc: `
f = (x) => x + 1
y = f(x: 10)`,
types: map[string]string{
"f": "forall [] (x: int) -> int",
"y": "forall [] int",
},
},
{
name: "call expression two args",
fluxSrc: `
f = (x, y) => x + y
y = f(x: 10, y: 30)`,
types: map[string]string{
"f": "forall [t0] where t0: Addable (x: t0, y: t0) -> t0",
"y": "forall [] int",
},
},
{
name: "call expression two args with pipe",
fluxSrc: `
f = (x, y=<-) => x + y
y = 30 |> f(x: 10)`,
types: map[string]string{
"f": "forall [t0] where t0: Addable (x: t0, <-y: t0) -> t0",
"y": "forall [] int",
},
},
{
name: "conditional expression",
fluxSrc: `
ans = if 100 > 0 then "yes" else "no"`,
types: map[string]string{
"ans": "forall [] string",
},
},
{
name: "identifier expression",
fluxSrc: `
x = 34
y = x`,
types: map[string]string{
"x": "forall [] int",
"y": "forall [] int",
},
},
{
name: "logical expression",
fluxSrc: `x = true and false or true`,
types: map[string]string{
"x": "forall [] bool",
},
},
{
name: "member expression/object expression",
fluxSrc: `
o = {temp: 30.0, loc: "FL"}
t = o.temp`,
types: map[string]string{
"o": "forall [] {loc: string | temp: float}",
"t": "forall [] float",
},
},
{
name: "object expression with",
fluxSrc: `
o = {temp: 30.0, loc: "FL"}
o2 = {o with city: "Tampa"}`,
types: map[string]string{
"o": "forall [] {loc: string | temp: float}",
"o2": "forall [] {city: string | loc: string | temp: float}",
},
},
{
name: "object expression extends",
fluxSrc: `
f = (r) => ({r with val: 32})
o = f(r: {val: "thirty-two"})`,
types: map[string]string{
"f": "forall [t0] (r: t0) -> {val: int | t0}",
"o": "forall [] {val: int | val: string}",
},
},
{
name: "unary expression",
fluxSrc: `
x = -1
y = +1
b = not false`,
types: map[string]string{
"x": "forall [] int",
"y": "forall [] int",
"b": "forall [] bool",
},
},
{
name: "exists operator",
fluxSrc: `e = exists {foo: 30}.bar`,
err: errors.New("type error @1:12-1:21: record is missing label bar"),
},
{
name: "exists operator with tvar",
fluxSrc: `f = (r) => exists r.foo`,
types: map[string]string{
"f": "forall [t0, t1] (r: {foo: t0 | t1}) -> bool",
},
},
{
// This seems to be a bug: https://github.com/influxdata/flux/issues/2355
name: "exists operator with tvar and call",
fluxSrc: `
f = (r) => exists r.foo
ff = (r) => f(r: {r with bar: 1})`,
types: map[string]string{
"f": "forall [t0, t1] (r: {foo: t0 | t1}) -> bool",
// Note: t1 is unused in the monotype, and t2 is not quantified.
// Type of ff should be the same as f.
"ff": "forall [t0, t2] (r: {foo: t0 | t1}) -> bool",
},
},
{
name: "datetime literal",
fluxSrc: `t = 2018-08-15T13:36:23-07:00`,
types: map[string]string{
"t": "forall [] time",
},
},
{
name: "duration literal",
fluxSrc: `d = 1y1mo1w1d1h1m1s1ms1us1ns`,
types: map[string]string{
"d": "forall [] duration",
},
},
{
name: "negative duration literal",
fluxSrc: `d = -1y1d`,
types: map[string]string{
"d": "forall [] duration",
},
},
{
name: "zero duration literal",
fluxSrc: `d = 0d`,
types: map[string]string{
"d": "forall [] duration",
},
},
{
name: "regexp literal",
fluxSrc: `re = /foo/`,
types: map[string]string{
"re": "forall [] regexp",
},
},
{
name: "float literal",
fluxSrc: `f = 3.0`,
types: map[string]string{
"f": "forall [] float",
},
},
{
name: "typical query",
fluxSrc: `
v = {
bucket: "telegraf",
windowPeriod: 15s,
timeRangeStart: -5m
}
q = from(bucket: v.bucket)
|> filter(fn: (r) => r._measurement == "disk")
|> filter(fn: (r) => r._field == "used_percent")`,
types: map[string]string{
"v": "forall [] {bucket: string | timeRangeStart: duration | windowPeriod: duration}",
"q": "forall [t0, t1] [{_field: string | _measurement: string | _time: time | _value: t0 | t1}]",
},
},
}
for _, tc := range tcs {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
astPkg := parser.ParseSource(tc.fluxSrc)
want, err := semantic.New(astPkg)
if err != nil {
t.Fatal(err)
}
if err := transformGraph(want); err != nil {
t.Fatal(err)
}
got, err := runtime.AnalyzeSource(tc.fluxSrc)
if err != nil {
if tc.err == nil {
t.Fatal(err)
}
if want, got := tc.err.Error(), canonicalizeError(err.Error()); want != got {
t.Fatalf("expected error %q, but got %q", want, got)
}
return
}
if tc.err != nil {
t.Fatalf("expected error %q, but got nothing", tc.err)
}
errs := checkExprTypes(got)
if len(errs) > 0 {
for _, e := range errs {
t.Error(e)
}
t.Fatal("found errors in expression types")
}
// Create a special comparison option to compare the types
// of NativeVariableAssignments using the expected types in the map
// provided by the test case.
assignCmp := cmp.Transformer("assign", func(nva *semantic.NativeVariableAssignment) *MyAssignement {
var typStr string
if nva.Typ.IsNil() == true {
// This is the assignment from Go.
var ok bool
typStr, ok = tc.types[nva.Identifier.Name]
if !ok {
typStr = "*** missing type ***"
}
} else {
// This is the assignment from Rust.
typStr = nva.Typ.CanonicalString()
}
return &MyAssignement{
Loc: nva.Loc,
Identifier: nva.Identifier,
Init: nva.Init,
Typ: typStr,
}
})
opts := make(cmp.Options, len(cmpOpts), len(cmpOpts)+2)
copy(opts, cmpOpts)
opts = append(opts, assignCmp, cmp.AllowUnexported(MyAssignement{}))
if diff := cmp.Diff(want, got, opts...); diff != "" {
t.Fatalf("differences in semantic graph: -want/+got:\n%v", diff)
}
})
}
}
| 1 | 16,018 | Surprised to see this old syntax here, but not _that_ surprised. | influxdata-flux | go |
@@ -164,9 +164,16 @@ abstract class AbstractQueue<T, Q extends AbstractQueue<T, Q>> implements Traver
return Collections.removeAll((Q) this, elements);
}
- @SuppressWarnings("unchecked")
+ @Deprecated
public Q removeAll(Predicate<? super T> predicate) {
- return Collections.removeAll((Q) this, predicate);
+ Objects.requireNonNull(predicate, "predicate is null");
+ return reject(predicate);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public Q reject(Predicate<? super T> predicate) {
+ return Collections.reject((Q) this, predicate);
}
@Override | 1 | /* __ __ __ __ __ ___
* \ \ / / \ \ / / __/
* \ \/ / /\ \ \/ / /
* \____/__/ \__\____/__/
*
* Copyright 2014-2017 Vavr, http://vavr.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.vavr.collection;
import io.vavr.Tuple;
import io.vavr.Tuple2;
import io.vavr.control.Option;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.function.Consumer;
import java.util.function.Predicate;
/**
* @author Pap Lőrinc, Daniel Dietrich
*/
abstract class AbstractQueue<T, Q extends AbstractQueue<T, Q>> implements Traversable<T> {
/**
* Removes an element from this Queue.
*
* @return a tuple containing the first element and the remaining elements of this Queue
* @throws NoSuchElementException if this Queue is empty
*/
public Tuple2<T, Q> dequeue() {
if (isEmpty()) {
throw new NoSuchElementException("dequeue of empty " + getClass().getSimpleName());
} else {
return Tuple.of(head(), tail());
}
}
/**
* Removes an element from this Queue.
*
* @return {@code None} if this Queue is empty, otherwise {@code Some} {@code Tuple} containing the first element and the remaining elements of this Queue
*/
public Option<Tuple2<T, Q>> dequeueOption() {
return isEmpty() ? Option.none() : Option.some(dequeue());
}
/**
* Enqueues a new element.
*
* @param element The new element
* @return a new {@code Queue} instance, containing the new element
*/
public abstract Q enqueue(T element);
/**
* Enqueues the given elements. A queue has FIFO order, i.e. the first of the given elements is
* the first which will be retrieved.
*
* @param elements Elements, may be empty
* @return a new {@code Queue} instance, containing the new elements
* @throws NullPointerException if elements is null
*/
@SuppressWarnings("unchecked")
public Q enqueue(T... elements) {
Objects.requireNonNull(elements, "elements is null");
return enqueueAll(List.of(elements));
}
/**
* Enqueues the given elements. A queue has FIFO order, i.e. the first of the given elements is
* the first which will be retrieved.
*
* @param elements An Iterable of elements, may be empty
* @return a new {@code Queue} instance, containing the new elements
* @throws NullPointerException if elements is null
*/
public abstract Q enqueueAll(Iterable<? extends T> elements);
/**
* Returns the first element without modifying it.
*
* @return the first element
* @throws NoSuchElementException if this Queue is empty
*/
public T peek() {
if (isEmpty()) {
throw new NoSuchElementException("peek of empty " + getClass().getSimpleName());
} else {
return head();
}
}
/**
* Returns the first element without modifying the Queue.
*
* @return {@code None} if this Queue is empty, otherwise a {@code Some} containing the first element
*/
public Option<T> peekOption() {
return isEmpty() ? Option.none() : Option.some(peek());
}
@Override
public Q dropUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return dropWhile(predicate.negate());
}
@Override
public abstract Q dropWhile(Predicate<? super T> predicate);
/**
* Dual of {@linkplain #tail()}, returning all elements except the last.
*
* @return a new instance containing all elements except the last.
* @throws UnsupportedOperationException if this is empty
*/
@Override
public abstract Q init();
/**
* Dual of {@linkplain #tailOption()}, returning all elements except the last as {@code Option}.
*
* @return {@code Some(Q)} or {@code None} if this is empty.
*/
@Override
public Option<Q> initOption() {
return isEmpty() ? Option.none() : Option.some(init());
}
/**
* Drops the first element of a non-empty Traversable.
*
* @return A new instance of Traversable containing all elements except the first.
* @throws UnsupportedOperationException if this is empty
*/
@Override
public abstract Q tail();
@Override
public Option<Q> tailOption() {
return isEmpty() ? Option.none() : Option.some(tail());
}
@Override
@SuppressWarnings("unchecked")
public Q retainAll(Iterable<? extends T> elements) {
return Collections.retainAll((Q) this, elements);
}
@SuppressWarnings("unchecked")
public Q removeAll(Iterable<? extends T> elements) {
return Collections.removeAll((Q) this, elements);
}
@SuppressWarnings("unchecked")
public Q removeAll(Predicate<? super T> predicate) {
return Collections.removeAll((Q) this, predicate);
}
@Override
public Q takeWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return takeUntil(predicate.negate());
}
@Override
public abstract Q takeUntil(Predicate<? super T> predicate);
@SuppressWarnings("unchecked")
@Override
public Q peek(Consumer<? super T> action) {
Objects.requireNonNull(action, "action is null");
if (!isEmpty()) {
action.accept(head());
}
return (Q) this;
}
@Override
public String toString() {
return mkString(stringPrefix() + "(", ", ", ")");
}
}
| 1 | 12,715 | It is good to mark it! Maybe we can remove the method for Vavr 1.0.0 - but for now it can stay as-is. Thx! | vavr-io-vavr | java |
@@ -15,11 +15,12 @@
package pool
import (
+ "errors"
"sync"
- "time"
"github.com/golang/glog"
- "github.com/openebs/maya/pkg/util"
+ col "github.com/openebs/maya/cmd/maya-exporter/app/collector"
+ types "github.com/openebs/maya/pkg/exec"
zpool "github.com/openebs/maya/pkg/zpool/v1alpha1"
"github.com/prometheus/client_golang/prometheus"
) | 1 | // Copyright © 2017-2019 The OpenEBS Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pool
import (
"sync"
"time"
"github.com/golang/glog"
"github.com/openebs/maya/pkg/util"
zpool "github.com/openebs/maya/pkg/zpool/v1alpha1"
"github.com/prometheus/client_golang/prometheus"
)
// pool implements prometheus.Collector interface
type pool struct {
sync.Mutex
metrics
request bool
}
var (
// runner variable is used for executing binaries
runner util.Runner
)
// InitVar initialize runner variable
func InitVar() {
runner = util.RealRunner{}
}
// New returns new instance of pool
func New() *pool {
return &pool{
metrics: newMetrics(),
}
}
func (p *pool) isRequestInProgress() bool {
return p.request
}
func (p *pool) setRequestToFalse() {
p.Lock()
p.request = false
p.Unlock()
}
// GetInitStatus run zpool binary to verify whether zpool container
// has started.
func (p *pool) GetInitStatus(timeout time.Duration) {
for {
stdout, err := zpool.Run(timeout, runner, "status")
if err != nil {
glog.Warningf("Failed to get zpool status, error: %v, pool container may be initializing, retry after 2s", err)
time.Sleep(2 * time.Second)
continue
}
str := string(stdout)
if zpool.IsNotAvailable(str) {
glog.Warning("No pool available, pool must be creating, retry after 3s")
time.Sleep(3 * time.Second)
continue
}
glog.Info("\n", string(stdout))
break
}
}
// collectors returns the list of the collectors
func (p *pool) collectors() []prometheus.Collector {
return []prometheus.Collector{
p.size,
p.status,
p.usedCapacity,
p.freeCapacity,
p.usedCapacityPercent,
p.zpoolCommandErrorCounter,
p.zpoolRejectRequestCounter,
p.zpoolListparseErrorCounter,
p.noPoolAvailableErrorCounter,
p.incompleteOutputErrorCounter,
}
}
// gaugeVec returns list of Gauge vectors (prometheus's type)
// related to zpool in which values will be set.
// NOTE: Please donot edit the order, add new metrics at the end of
// the list
func (p *pool) gaugeVec() []prometheus.Gauge {
return []prometheus.Gauge{
p.size,
p.usedCapacity,
p.freeCapacity,
p.usedCapacityPercent,
}
}
// Describe is implementation of Describe method of prometheus.Collector
// interface.
func (p *pool) Describe(ch chan<- *prometheus.Desc) {
for _, col := range p.collectors() {
col.Describe(ch)
}
}
func (p *pool) getZpoolStats(ch chan<- prometheus.Metric) (zpool.Stats, error) {
var (
err error
stdoutZpool []byte
timeout = 30 * time.Second
zpoolStats = zpool.Stats{}
)
stdoutZpool, err = zpool.Run(timeout, runner, "list", "-Hp")
if err != nil {
p.zpoolCommandErrorCounter.Inc()
p.zpoolCommandErrorCounter.Collect(ch)
return zpoolStats, err
}
glog.V(2).Infof("Parse stdout of zpool list command, stdout: %v", string(stdoutZpool))
zpoolStats, err = zpool.ListParser(stdoutZpool)
if err != nil {
if err.Error() == string(zpool.NoPoolAvailable) {
p.noPoolAvailableErrorCounter.Inc()
p.noPoolAvailableErrorCounter.Collect(ch)
} else {
p.incompleteOutputErrorCounter.Inc()
p.incompleteOutputErrorCounter.Collect(ch)
}
return zpoolStats, err
}
return zpoolStats, nil
}
// Collect is implementation of prometheus's prometheus.Collector interface
func (p *pool) Collect(ch chan<- prometheus.Metric) {
p.Lock()
if p.isRequestInProgress() {
p.zpoolRejectRequestCounter.Inc()
p.Unlock()
p.zpoolRejectRequestCounter.Collect(ch)
return
}
p.request = true
p.Unlock()
poolStats := statsFloat64{}
zpoolStats, err := p.getZpoolStats(ch)
if err != nil {
p.setRequestToFalse()
return
}
glog.V(2).Infof("Got zpool stats: %#v", zpoolStats)
poolStats.parse(zpoolStats, p)
p.setZPoolStats(poolStats, zpoolStats.Name)
for _, col := range p.collectors() {
col.Collect(ch)
}
p.setRequestToFalse()
}
func (p *pool) setZPoolStats(stats statsFloat64, name string) {
items := stats.List()
for index, col := range p.gaugeVec() {
col.Set(items[index])
}
p.status.WithLabelValues(name).Set(stats.status)
}
| 1 | 12,234 | exported func New returns unexported type *pool.pool, which can be annoying to use | openebs-maya | go |
@@ -17,7 +17,7 @@ module BoltSpec
# Nothing on the executor is 'public'
class MockExecutor
- attr_reader :noop, :error_message, :in_parallel
+ attr_reader :noop, :error_message, :in_parallel, :transports
attr_accessor :run_as, :transport_features, :execute_any_plan
def initialize(modulepath) | 1 | # frozen_string_literal: true
require 'bolt_spec/plans/action_stubs'
require 'bolt_spec/plans/publish_stub'
require 'bolt/error'
require 'bolt/executor'
require 'bolt/result_set'
require 'bolt/result'
require 'pathname'
require 'set'
module BoltSpec
module Plans
MOCKED_ACTIONS = %i[command download plan script task upload].freeze
class UnexpectedInvocation < ArgumentError; end
# Nothing on the executor is 'public'
class MockExecutor
attr_reader :noop, :error_message, :in_parallel
attr_accessor :run_as, :transport_features, :execute_any_plan
def initialize(modulepath)
@noop = false
@run_as = nil
@in_parallel = false
@error_message = nil
@allow_apply = false
@modulepath = [modulepath].flatten.map { |path| File.absolute_path(path) }
MOCKED_ACTIONS.each { |action| instance_variable_set(:"@#{action}_doubles", {}) }
@stub_out_message = nil
@transport_features = ['puppet-agent']
@executor_real = Bolt::Executor.new
# by default, we want to execute any plan that we come across without error
# or mocking. users can toggle this behavior so that plans will either need to
# be mocked out, or an error will be thrown.
@execute_any_plan = true
# plans that are allowed to be executed by the @executor_real
@allowed_exec_plans = {}
end
def module_file_id(file)
modpath = @modulepath.select { |path| file =~ /^#{path}/ }
raise "Could not identify modulepath containing #{file}: #{modpath}" unless modpath.size == 1
path = Pathname.new(file)
relative = path.relative_path_from(Pathname.new(modpath.first))
segments = relative.to_path.split('/')
([segments[0]] + segments[2..-1]).join('/')
end
def run_command(targets, command, options = {}, _position = [])
result = nil
if (doub = @command_doubles[command] || @command_doubles[:default])
result = doub.process(targets, command, options)
end
unless result
targets = targets.map(&:name)
@error_message = "Unexpected call to 'run_command(#{command}, #{targets}, #{options})'"
raise UnexpectedInvocation, @error_message
end
result
end
def run_script(targets, script_path, arguments, options = {}, _position = [])
script = module_file_id(script_path)
result = nil
if (doub = @script_doubles[script] || @script_doubles[:default])
result = doub.process(targets, script, arguments, options)
end
unless result
targets = targets.map(&:name)
params = options.merge('arguments' => arguments)
@error_message = "Unexpected call to 'run_script(#{script}, #{targets}, #{params})'"
raise UnexpectedInvocation, @error_message
end
result
end
def run_task(targets, task, arguments, options = {}, _position = [])
result = nil
if (doub = @task_doubles[task.name] || @task_doubles[:default])
result = doub.process(targets, task.name, arguments, options)
end
unless result
targets = targets.map(&:name)
params = arguments.merge(options)
@error_message = "Unexpected call to 'run_task(#{task.name}, #{targets}, #{params})'"
raise UnexpectedInvocation, @error_message
end
result
end
def download_file(targets, source, destination, options = {}, _position = [])
result = nil
if (doub = @download_doubles[source] || @download_doubles[:default])
result = doub.process(targets, source, destination, options)
end
unless result
targets = targets.map(&:name)
@error_message = "Unexpected call to 'download_file(#{source}, #{destination}, #{targets}, #{options})'"
raise UnexpectedInvocation, @error_message
end
result
end
def upload_file(targets, source_path, destination, options = {}, _position = [])
source = module_file_id(source_path)
result = nil
if (doub = @upload_doubles[source] || @upload_doubles[:default])
result = doub.process(targets, source, destination, options)
end
unless result
targets = targets.map(&:name)
@error_message = "Unexpected call to 'upload_file(#{source}, #{destination}, #{targets}, #{options})'"
raise UnexpectedInvocation, @error_message
end
result
end
def with_plan_allowed_exec(plan_name, params)
@allowed_exec_plans[plan_name] = params
result = yield
@allowed_exec_plans.delete(plan_name)
result
end
def run_plan(scope, plan_clj, params)
result = nil
plan_name = plan_clj.closure_name
# get the mock object either by plan name, or the default in case allow_any_plan
# was called, if both are nil / don't exist, then dub will be nil and we'll fall
# through to another conditional statement
doub = @plan_doubles[plan_name] || @plan_doubles[:default]
# rubocop:disable Lint/DuplicateBranch
# High level:
# - If we've explicitly allowed execution of the plan (normally the main plan
# passed into BoltSpec::Plan::run_plan()), then execute it
# - If we've explicitly "allowed/expected" the plan (mocked),
# then run it through the mock object
# - If we're allowing "any" plan to be executed,
# then execute it
# - Otherwise we have an error
if @allowed_exec_plans.key?(plan_name) && @allowed_exec_plans[plan_name] == params
# This plan's name + parameters were explicitly allowed to be executed.
# run it with the real executor.
# We require this functionality so that the BoltSpec::Plans.run_plan()
# function can kick off the initial plan. In reality, no other plans should
# be in this hash.
result = @executor_real.run_plan(scope, plan_clj, params)
elsif doub
result = doub.process(scope, plan_clj, params)
# the throw here is how Puppet exits out of a closure and returns a result
# it throws this special symbol with a result object that is captured by
# the run_plan Puppet function
throw :return, result
elsif @execute_any_plan
# if the plan wasn't allowed or mocked out, and we're allowing any plan to be
# executed, then execute the plan
result = @executor_real.run_plan(scope, plan_clj, params)
else
# convert to JSON and back so that we get the ruby representation with all keys and
# values converted to a string .to_s instead of their ruby object notation
params_str = JSON.parse(params.to_json)
@error_message = "Unexpected call to 'run_plan(#{plan_name}, #{params_str})'"
raise UnexpectedInvocation, @error_message
end
# rubocop:enable Lint/DuplicateBranch
result
end
def assert_call_expectations
MOCKED_ACTIONS.each do |action|
instance_variable_get(:"@#{action}_doubles").map do |object, doub|
doub.assert_called(object)
end
end
@stub_out_message.assert_called('out::message') if @stub_out_message
end
MOCKED_ACTIONS.each do |action|
define_method(:"stub_#{action}") do |object|
instance_variable_get(:"@#{action}_doubles")[object] ||= ActionDouble.new(:"#{action.capitalize}Stub")
end
end
def stub_out_message
@stub_out_message ||= ActionDouble.new(:PublishStub)
end
def stub_apply
@allow_apply = true
end
def wait_until_available(targets, _options)
Bolt::ResultSet.new(targets.map { |target| Bolt::Result.new(target) })
end
def log_action(*_args)
yield
end
def log_plan(_plan_name)
yield
end
def without_default_logging
yield
end
def report_function_call(_function); end
def report_bundled_content(_mode, _name); end
def report_file_source(_plan_function, _source); end
def report_apply(_statements, _resources); end
def report_yaml_plan(_plan); end
def publish_event(event)
if event[:type] == :message
unless @stub_out_message
@error_message = "Unexpected call to 'out::message(#{event[:message]})'"
raise UnexpectedInvocation, @error_message
end
@stub_out_message.process(event[:message])
end
end
# Mocked for Apply so it does not compile and execute.
def with_node_logging(_description, targets)
raise "Unexpected call to apply(#{targets})" unless @allow_apply
end
def queue_execute(targets)
raise "Unexpected call to apply(#{targets})" unless @allow_apply
targets
end
def await_results(promises)
raise "Unexpected call to apply(#{targets})" unless @allow_apply
Bolt::ResultSet.new(promises.map { |target| Bolt::ApplyResult.new(target) })
end
# End Apply mocking
# Mocked for apply_prep
def transport(_protocol)
Class.new do
attr_reader :provided_features
def initialize(features)
@provided_features = features
end
end.new(transport_features)
end
# End apply_prep mocking
end
end
end
| 1 | 18,066 | Sorry, I feel like I'm missing something - where does this get called? | puppetlabs-bolt | rb |
@@ -423,12 +423,9 @@ func (e *edged) syncNodeStatus() {
klog.Errorf("Register node failed: %v", err)
return
}
- if err := e.updateNodeStatus(); err != nil {
- klog.Errorf("Unable to update node status: %v", err)
- }
- } else {
- if err := e.updateNodeStatus(); err != nil {
- klog.Errorf("Unable to update node status: %v", err)
- }
+ }
+
+ if err := e.updateNodeStatus(); err != nil {
+ klog.Errorf("Unable to update node status: %v", err)
}
} | 1 | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@CHANGELOG
KubeEdge Authors: To create mini-kubelet for edge deployment scenario,
This file is derived from K8S Kubelet code with reduced set of methods
Changes done are
1. setNodeReadyCondition is partially come from "k8s.io/kubernetes/pkg/kubelet.setNodeReadyCondition"
*/
package edged
import (
"fmt"
"io/ioutil"
"os"
"regexp"
"runtime"
"strconv"
"strings"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog"
beehiveContext "github.com/kubeedge/beehive/pkg/core/context"
"github.com/kubeedge/beehive/pkg/core/model"
"github.com/kubeedge/kubeedge/common/constants"
edgeapi "github.com/kubeedge/kubeedge/common/types"
"github.com/kubeedge/kubeedge/edge/pkg/common/message"
"github.com/kubeedge/kubeedge/edge/pkg/common/modules"
"github.com/kubeedge/kubeedge/edge/pkg/edged/apis"
"github.com/kubeedge/kubeedge/edge/pkg/edged/config"
"github.com/kubeedge/kubeedge/edge/pkg/edgehub"
"github.com/kubeedge/kubeedge/pkg/util"
)
//GPUInfoQueryTool sets information monitoring tool location for GPU
var GPUInfoQueryTool = "/var/IEF/nvidia/bin/nvidia-smi"
var initNode v1.Node
var reservationMemory = resource.MustParse(fmt.Sprintf("%dMi", 100))
func (e *edged) initialNode() (*v1.Node, error) {
var node = &v1.Node{}
if runtime.GOOS == "windows" {
return node, nil
}
nodeInfo, err := e.getNodeInfo()
if err != nil {
return nil, err
}
node.Status.NodeInfo = nodeInfo
hostname, err := os.Hostname()
if err != nil {
klog.Errorf("couldn't determine hostname: %v", err)
return nil, err
}
if len(e.nodeName) != 0 {
hostname = e.nodeName
}
node.Labels = map[string]string{
// Kubernetes built-in labels
v1.LabelHostname: hostname,
v1.LabelOSStable: runtime.GOOS,
v1.LabelArchStable: runtime.GOARCH,
// KubeEdge specific labels
"node-role.kubernetes.io/edge": "",
"node-role.kubernetes.io/agent": "",
}
ip, err := e.getIP()
if err != nil {
return nil, err
}
node.Status.Addresses = []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: ip},
{Type: v1.NodeHostName, Address: hostname},
}
node.Status.Capacity = make(v1.ResourceList)
node.Status.Allocatable = make(v1.ResourceList)
err = e.setMemInfo(node.Status.Capacity, node.Status.Allocatable)
if err != nil {
return nil, err
}
err = e.setCPUInfo(node.Status.Capacity, node.Status.Allocatable)
if err != nil {
return nil, err
}
node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(110, resource.DecimalSI)
node.Status.Allocatable[v1.ResourcePods] = *resource.NewQuantity(110, resource.DecimalSI)
return node, nil
}
func (e *edged) setInitNode(node *v1.Node) {
initNode.Status = *node.Status.DeepCopy()
}
// Retrieve node status
func retrieveDevicePluginStatus(s string) (string, error) {
tagLen := len(apis.StatusTag)
if len(s) <= tagLen {
return "", fmt.Errorf("no node status wrapped in")
}
tag := s[:tagLen]
if string(tag) != apis.StatusTag {
return "", fmt.Errorf("not a node status json string")
}
statusList := s[tagLen:]
klog.Infof("retrieve piggybacked status: %v", statusList)
return statusList, nil
}
func (e *edged) getNodeStatusRequest(node *v1.Node) (*edgeapi.NodeStatusRequest, error) {
var nodeStatus = &edgeapi.NodeStatusRequest{}
nodeStatus.UID = e.uid
nodeStatus.Status = *node.Status.DeepCopy()
nodeStatus.Status.Phase = e.getNodePhase()
devicePluginCapacity, _, removedDevicePlugins := e.getDevicePluginResourceCapacity()
for k, v := range devicePluginCapacity {
klog.Infof("Update capacity for %s to %d", k, v.Value())
nodeStatus.Status.Capacity[k] = v
nodeStatus.Status.Allocatable[k] = v
}
nameSet := sets.NewString(string(v1.ResourceCPU), string(v1.ResourceMemory), string(v1.ResourceStorage),
string(v1.ResourceEphemeralStorage), string(apis.NvidiaGPUScalarResourceName))
for _, removedResource := range removedDevicePlugins {
// if the remmovedReousrce is not contained in the nameSet and contains specific tag
if !nameSet.Has(removedResource) {
status, err := retrieveDevicePluginStatus(removedResource)
if err == nil {
if node.Annotations == nil {
node.Annotations = make(map[string]string)
}
node.Annotations[apis.NvidiaGPUStatusAnnotationKey] = status
klog.Infof("Setting node annotation to add node status list to Scheduler")
continue
}
}
klog.Infof("Remove capacity for %s", removedResource)
delete(node.Status.Capacity, v1.ResourceName(removedResource))
}
e.setNodeStatusDaemonEndpoints(nodeStatus)
e.setNodeStatusConditions(nodeStatus)
if e.gpuPluginEnabled {
err := e.setGPUInfo(nodeStatus)
if err != nil {
klog.Errorf("setGPUInfo failed, err: %v", err)
}
}
if e.volumeManager.ReconcilerStatesHasBeenSynced() {
node.Status.VolumesInUse = e.volumeManager.GetVolumesInUse()
} else {
node.Status.VolumesInUse = nil
}
e.volumeManager.MarkVolumesAsReportedInUse(node.Status.VolumesInUse)
klog.Infof("Sync VolumesInUse: %v", node.Status.VolumesInUse)
return nodeStatus, nil
}
func (e *edged) setNodeStatusDaemonEndpoints(node *edgeapi.NodeStatusRequest) {
node.Status.DaemonEndpoints = v1.NodeDaemonEndpoints{
KubeletEndpoint: v1.DaemonEndpoint{
Port: constants.ServerPort,
},
}
}
func (e *edged) setNodeStatusConditions(node *edgeapi.NodeStatusRequest) {
e.setNodeReadyCondition(node)
}
// setNodeReadyCondition is partially come from "k8s.io/kubernetes/pkg/kubelet.setNodeReadyCondition"
func (e *edged) setNodeReadyCondition(node *edgeapi.NodeStatusRequest) {
currentTime := metav1.NewTime(time.Now())
var newNodeReadyCondition v1.NodeCondition
var err error
_, err = e.containerRuntime.Version()
if err != nil {
newNodeReadyCondition = v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
Reason: "EdgeNotReady",
Message: err.Error(),
LastHeartbeatTime: currentTime,
}
} else {
newNodeReadyCondition = v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "EdgeReady",
Message: "edge is posting ready status",
LastHeartbeatTime: currentTime,
}
}
readyConditionUpdated := false
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == v1.NodeReady {
if node.Status.Conditions[i].Status == newNodeReadyCondition.Status {
newNodeReadyCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime
} else {
newNodeReadyCondition.LastTransitionTime = currentTime
}
node.Status.Conditions[i] = newNodeReadyCondition
readyConditionUpdated = true
break
}
}
if !readyConditionUpdated {
newNodeReadyCondition.LastTransitionTime = currentTime
node.Status.Conditions = append(node.Status.Conditions, newNodeReadyCondition)
}
}
func (e *edged) getNodeInfo() (v1.NodeSystemInfo, error) {
nodeInfo := v1.NodeSystemInfo{}
kernel, err := util.Command("uname", []string{"-r"})
if err != nil {
return nodeInfo, err
}
prettyName, err := util.Command("sh", []string{"-c", `cat /etc/os-release | grep PRETTY_NAME| awk -F '"' '{print$2}'`})
if err != nil {
return nodeInfo, err
}
runtimeVersion, err := e.containerRuntime.Version()
if err != nil {
return nodeInfo, err
}
nodeInfo.ContainerRuntimeVersion = fmt.Sprintf("%s://%s", e.containerRuntimeName, runtimeVersion.String())
nodeInfo.KernelVersion = kernel
nodeInfo.OperatingSystem = runtime.GOOS
nodeInfo.Architecture = runtime.GOARCH
nodeInfo.KubeletVersion = e.version
nodeInfo.OSImage = prettyName
return nodeInfo, nil
}
func (e *edged) setGPUInfo(nodeStatus *edgeapi.NodeStatusRequest) error {
_, err := os.Stat(GPUInfoQueryTool)
if err != nil {
return fmt.Errorf("can not get file in path: %s, err: %v", GPUInfoQueryTool, err)
}
nodeStatus.ExtendResources = make(map[v1.ResourceName][]edgeapi.ExtendResource)
result, err := util.Command("sh", []string{"-c", fmt.Sprintf("%s -L", GPUInfoQueryTool)})
if err != nil {
return err
}
re := regexp.MustCompile(`GPU .*:.*\(.*\)`)
gpuInfos := re.FindAllString(result, -1)
gpuResources := make([]edgeapi.ExtendResource, 0)
gpuRegexp := regexp.MustCompile(`^GPU ([\d]+):(.*)\(.*\)`)
for _, gpuInfo := range gpuInfos {
params := gpuRegexp.FindStringSubmatch(strings.TrimSpace(gpuInfo))
if len(params) != 3 {
klog.Errorf("parse gpu failed, gpuInfo: %v, params: %v", gpuInfo, params)
continue
}
gpuName := params[1]
gpuType := params[2]
result, err = util.Command("sh", []string{"-c", fmt.Sprintf("%s -i %s -a|grep -A 3 \"FB Memory Usage\"| grep Total", GPUInfoQueryTool, gpuName)})
if err != nil {
klog.Errorf("get gpu(%v) memory failed, err: %v", gpuName, err)
continue
}
parts := strings.Split(result, ":")
if len(parts) != 2 {
klog.Errorf("parse gpu(%v) memory failed, parts: %v", gpuName, parts)
continue
}
mem := strings.TrimSpace(strings.Split(strings.TrimSpace(parts[1]), " ")[0])
gpuResource := edgeapi.ExtendResource{}
gpuResource.Name = fmt.Sprintf("nvidia%v", gpuName)
gpuResource.Type = gpuType
gpuResource.Capacity = resource.MustParse(mem + "Mi")
gpuResources = append(gpuResources, gpuResource)
}
nodeStatus.ExtendResources[apis.NvidiaGPUResource] = gpuResources
return nil
}
func (e *edged) getIP() (string, error) {
if nodeIP := config.Config.NodeIP; nodeIP != "" {
return nodeIP, nil
}
hostName, _ := os.Hostname()
if hostName == "" {
hostName = e.nodeName
}
return util.GetLocalIP(hostName)
}
func (e *edged) setMemInfo(total, allocated v1.ResourceList) error {
out, err := ioutil.ReadFile("/proc/meminfo")
if err != nil {
return err
}
matches := regexp.MustCompile(`MemTotal:\s*([0-9]+) kB`).FindSubmatch(out)
if len(matches) != 2 {
return fmt.Errorf("failed to match regexp in output: %q", string(out))
}
m, err := strconv.ParseInt(string(matches[1]), 10, 64)
if err != nil {
return err
}
totalMem := m / 1024
mem := resource.MustParse(strconv.FormatInt(totalMem, 10) + "Mi")
total[v1.ResourceMemory] = mem.DeepCopy()
if mem.Cmp(reservationMemory) > 0 {
mem.Sub(reservationMemory)
}
allocated[v1.ResourceMemory] = mem.DeepCopy()
return nil
}
func (e *edged) setCPUInfo(total, allocated v1.ResourceList) error {
total[v1.ResourceCPU] = resource.MustParse(fmt.Sprintf("%d", runtime.NumCPU()))
allocated[v1.ResourceCPU] = total[v1.ResourceCPU].DeepCopy()
return nil
}
func (e *edged) getDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) {
return e.containerManager.GetDevicePluginResourceCapacity()
}
func (e *edged) getNodePhase() v1.NodePhase {
return v1.NodeRunning
}
func (e *edged) registerNode() error {
node, err := e.initialNode()
if err != nil {
klog.Errorf("Unable to construct v1.Node object for edge: %v", err)
return err
}
e.setInitNode(node)
if !config.Config.RegisterNode {
//when register-node set to false, do not auto register node
klog.Infof("register-node is set to false")
e.registrationCompleted = true
return nil
}
klog.Infof("Attempting to register node %s", e.nodeName)
resource := fmt.Sprintf("%s/%s/%s", e.namespace, model.ResourceTypeNodeStatus, e.nodeName)
nodeInfoMsg := message.BuildMsg(modules.MetaGroup, "", modules.EdgedModuleName, resource, model.InsertOperation, node)
res, err := beehiveContext.SendSync(edgehub.ModuleNameEdgeHub, *nodeInfoMsg, syncMsgRespTimeout)
if err != nil || res.Content != "OK" {
klog.Errorf("register node failed, error: %v", err)
if res.Content != "OK" {
klog.Errorf("response from cloud core: %s", res.Content)
}
return err
}
klog.Infof("Successfully registered node %s", e.nodeName)
e.registrationCompleted = true
return nil
}
func (e *edged) updateNodeStatus() error {
nodeStatus, err := e.getNodeStatusRequest(&initNode)
if err != nil {
klog.Errorf("Unable to construct api.NodeStatusRequest object for edge: %v", err)
return err
}
err = e.metaClient.NodeStatus(e.namespace).Update(e.nodeName, *nodeStatus)
if err != nil {
klog.Errorf("update node failed, error: %v", err)
}
return nil
}
func (e *edged) syncNodeStatus() {
if !e.registrationCompleted {
if err := e.registerNode(); err != nil {
klog.Errorf("Register node failed: %v", err)
return
}
if err := e.updateNodeStatus(); err != nil {
klog.Errorf("Unable to update node status: %v", err)
}
} else {
if err := e.updateNodeStatus(); err != nil {
klog.Errorf("Unable to update node status: %v", err)
}
}
}
| 1 | 19,076 | @guihecheng Thanks for the fixing! Actually this is not the dup codes, when we first registerNode, then update the nodestatus immediately, or the node will keep not ready for 10s. And do you have other ways to refactor it? | kubeedge-kubeedge | go |
@@ -14,6 +14,7 @@
#include "SystemInfo.hpp"
+#include <sys/stat.h>
#ifdef _WIN32
#include <windows.h>
#else | 1 | // Copyright 2021 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "SystemInfo.hpp"
#ifdef _WIN32
#include <windows.h>
#else
#include <pwd.h>
#include <unistd.h>
#endif // _WIN32
#include <fastrtps/types/TypesBase.h>
namespace eprosima {
using ReturnCode_t = fastrtps::types::ReturnCode_t;
ReturnCode_t SystemInfo::get_env(
const char* env_name,
const char** env_value)
{
if (env_name == nullptr || env_value == nullptr || *env_name == '\0')
{
return ReturnCode_t::RETCODE_BAD_PARAMETER;
}
#pragma warning(suppress:4996)
*env_value = getenv(env_name);
if (*env_value == nullptr)
{
return ReturnCode_t::RETCODE_NO_DATA;
}
return ReturnCode_t::RETCODE_OK;
}
ReturnCode_t SystemInfo::get_username(
std::string& username)
{
#ifdef _WIN32
#define INFO_BUFFER_SIZE 32767
char user[INFO_BUFFER_SIZE];
DWORD bufCharCount = INFO_BUFFER_SIZE;
if (!GetUserName(user, &bufCharCount))
{
return ReturnCode_t::RETCODE_ERROR;
}
username = user;
return ReturnCode_t::RETCODE_OK;
#else
uid_t user_id = geteuid();
struct passwd* pwd = getpwuid(user_id);
if (pwd != nullptr)
{
username = pwd->pw_name;
if (!username.empty())
{
return ReturnCode_t::RETCODE_OK;
}
}
return ReturnCode_t::RETCODE_ERROR;
#endif // _WIN32
}
} // eprosima
| 1 | 22,915 | How portable is this? (e.g., Windows). I'm relaunching tests of windows because they failed for other reasons. | eProsima-Fast-DDS | cpp |
@@ -7498,10 +7498,13 @@ static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_stat
cb_state->hasDrawCmd = true;
// Add descriptor image/CIS layouts to CB layout map
- auto &desc_sets = cb_state->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].boundDescriptorSets;
- for (auto &desc : desc_sets) {
- if (desc) {
- desc->UpdateDSImageLayoutState(cb_state);
+ PIPELINE_STATE *pPipe = cb_state->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
+
+ for (const auto &set_binding_pair : pPipe->active_slots) {
+ uint32_t setIndex = set_binding_pair.first;
+ auto &desc_set = cb_state->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].boundDescriptorSets[setIndex];
+ if (desc_set) {
+ desc_set->UpdateDSImageLayoutState(cb_state);
}
}
} | 1 | /* Copyright (c) 2015-2018 The Khronos Group Inc.
* Copyright (c) 2015-2018 Valve Corporation
* Copyright (c) 2015-2018 LunarG, Inc.
* Copyright (C) 2015-2018 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Cody Northrop <[email protected]>
* Author: Michael Lentine <[email protected]>
* Author: Tobin Ehlis <[email protected]>
* Author: Chia-I Wu <[email protected]>
* Author: Chris Forbes <[email protected]>
* Author: Mark Lobodzinski <[email protected]>
* Author: Ian Elliott <[email protected]>
* Author: Dave Houlton <[email protected]>
* Author: Dustin Graves <[email protected]>
* Author: Jeremy Hayes <[email protected]>
* Author: Jon Ashburn <[email protected]>
* Author: Karl Schultz <[email protected]>
* Author: Mark Young <[email protected]>
* Author: Mike Schuchardt <[email protected]>
* Author: Mike Weiblen <[email protected]>
* Author: Tony Barbour <[email protected]>
*/
// Allow use of STL min and max functions in Windows
#define NOMINMAX
#define VALIDATION_ERROR_MAP_IMPL
#include <algorithm>
#include <array>
#include <assert.h>
#include <iostream>
#include <list>
#include <map>
#include <memory>
#include <mutex>
#include <set>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <valarray>
#include "vk_loader_platform.h"
#include "vk_dispatch_table_helper.h"
#include "vk_enum_string_helper.h"
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wwrite-strings"
#endif
#if defined(__GNUC__)
#pragma GCC diagnostic warning "-Wwrite-strings"
#endif
#include "core_validation.h"
#include "buffer_validation.h"
#include "shader_validation.h"
#include "vk_layer_data.h"
#include "vk_layer_extension_utils.h"
#include "vk_layer_utils.h"
#include "vk_typemap_helper.h"
#if defined __ANDROID__
#include <android/log.h>
#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "CORE_VALIDATION", __VA_ARGS__))
#else
#define LOGCONSOLE(...) \
{ \
printf(__VA_ARGS__); \
printf("\n"); \
}
#endif
// This intentionally includes a cpp file
#include "vk_safe_struct.cpp"
using mutex_t = std::mutex;
using lock_guard_t = std::lock_guard<mutex_t>;
using unique_lock_t = std::unique_lock<mutex_t>;
// These functions are defined *outside* the core_validation namespace as their type
// is also defined outside that namespace
size_t PipelineLayoutCompatDef::hash() const {
hash_util::HashCombiner hc;
// The set number is integral to the CompatDef's distinctiveness
hc << set << push_constant_ranges.get();
const auto &descriptor_set_layouts = *set_layouts_id.get();
for (uint32_t i = 0; i <= set; i++) {
hc << descriptor_set_layouts[i].get();
}
return hc.Value();
}
bool PipelineLayoutCompatDef::operator==(const PipelineLayoutCompatDef &other) const {
if ((set != other.set) || (push_constant_ranges != other.push_constant_ranges)) {
return false;
}
if (set_layouts_id == other.set_layouts_id) {
// if it's the same set_layouts_id, then *any* subset will match
return true;
}
// They aren't exactly the same PipelineLayoutSetLayouts, so we need to check if the required subsets match
const auto &descriptor_set_layouts = *set_layouts_id.get();
assert(set < descriptor_set_layouts.size());
const auto &other_ds_layouts = *other.set_layouts_id.get();
assert(set < other_ds_layouts.size());
for (uint32_t i = 0; i <= set; i++) {
if (descriptor_set_layouts[i] != other_ds_layouts[i]) {
return false;
}
}
return true;
}
namespace core_validation {
using std::max;
using std::string;
using std::stringstream;
using std::unique_ptr;
using std::unordered_map;
using std::unordered_set;
using std::vector;
// WSI Image Objects bypass usual Image Object creation methods. A special Memory
// Object value will be used to identify them internally.
static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
// 2nd special memory handle used to flag object as unbound from memory
static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
struct instance_layer_data {
VkInstance instance = VK_NULL_HANDLE;
debug_report_data *report_data = nullptr;
vector<VkDebugReportCallbackEXT> logging_callback;
vector<VkDebugUtilsMessengerEXT> logging_messenger;
VkLayerInstanceDispatchTable dispatch_table;
CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
uint32_t physical_devices_count = 0;
CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
uint32_t physical_device_groups_count = 0;
CHECK_DISABLED disabled = {};
unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
InstanceExtensions extensions;
uint32_t api_version;
};
struct layer_data {
debug_report_data *report_data = nullptr;
VkLayerDispatchTable dispatch_table;
DeviceExtensions extensions = {};
unordered_set<VkQueue> queues; // All queues under given device
// Layer specific data
unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
unordered_map<VkPipeline, unique_ptr<PIPELINE_STATE>> pipelineMap;
unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
unordered_map<VkDescriptorSetLayout, std::shared_ptr<cvdescriptorset::DescriptorSetLayout>> descriptorSetLayoutMap;
unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
unordered_map<VkFence, FENCE_NODE> fenceMap;
unordered_map<VkQueue, QUEUE_STATE> queueMap;
unordered_map<VkEvent, EVENT_STATE> eventMap;
unordered_map<QueryObject, bool> queryToStateMap;
unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
unordered_map<VkRenderPass, std::shared_ptr<RENDER_PASS_STATE>> renderPassMap;
unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
unordered_map<VkDescriptorUpdateTemplateKHR, unique_ptr<TEMPLATE_STATE>> desc_template_map;
unordered_map<VkSwapchainKHR, std::unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> qfo_release_image_barrier_map;
GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> qfo_release_buffer_barrier_map;
VkDevice device = VK_NULL_HANDLE;
VkPhysicalDevice physical_device = VK_NULL_HANDLE;
instance_layer_data *instance_data = nullptr; // from device to enclosing instance
DeviceFeatures enabled_features = {};
// Device specific data
PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
VkPhysicalDeviceProperties phys_dev_props = {};
// Device extension properties -- storing properties gathered from VkPhysicalDeviceProperties2KHR::pNext chain
struct DeviceExtensionProperties {
uint32_t max_push_descriptors; // from VkPhysicalDevicePushDescriptorPropertiesKHR::maxPushDescriptors
VkPhysicalDeviceDescriptorIndexingPropertiesEXT descriptor_indexing_props;
VkPhysicalDeviceShadingRateImagePropertiesNV shading_rate_image_props;
VkPhysicalDeviceMeshShaderPropertiesNV mesh_shader_props;
VkPhysicalDeviceInlineUniformBlockPropertiesEXT inline_uniform_block_props;
};
DeviceExtensionProperties phys_dev_ext_props = {};
bool external_sync_warning = false;
uint32_t api_version = 0;
};
// TODO : Do we need to guard access to layer_data_map w/ lock?
static unordered_map<void *, layer_data *> layer_data_map;
static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
static const VkLayerProperties global_layer = {
"VK_LAYER_LUNARG_core_validation",
VK_LAYER_API_VERSION,
1,
"LunarG Validation Layer",
};
static const VkExtensionProperties device_extensions[] = {
{VK_EXT_VALIDATION_CACHE_EXTENSION_NAME, VK_EXT_VALIDATION_CACHE_SPEC_VERSION},
};
template <class TCreateInfo>
void ValidateLayerOrdering(const TCreateInfo &createInfo) {
bool foundLayer = false;
for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
foundLayer = true;
}
// This has to be logged to console as we don't have a callback at this point.
if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
}
}
}
// TODO : This can be much smarter, using separate locks for separate global data
static mutex_t global_lock;
// Get the global map of pending releases
GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> &GetGlobalQFOReleaseBarrierMap(
layer_data *dev_data, const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) {
return dev_data->qfo_release_image_barrier_map;
}
GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> &GetGlobalQFOReleaseBarrierMap(
layer_data *dev_data, const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) {
return dev_data->qfo_release_buffer_barrier_map;
}
// Get the image viewstate for a given framebuffer attachment
IMAGE_VIEW_STATE *GetAttachmentImageViewState(layer_data *dev_data, FRAMEBUFFER_STATE *framebuffer, uint32_t index) {
assert(framebuffer && (index < framebuffer->createInfo.attachmentCount));
#ifdef FRAMEBUFFER_ATTACHMENT_STATE_CACHE
return framebuffer->attachments[index].view_state;
#else
const VkImageView &image_view = framebuffer->createInfo.pAttachments[index];
return GetImageViewState(dev_data, image_view);
#endif
}
// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
IMAGE_VIEW_STATE *GetImageViewState(const layer_data *dev_data, VkImageView image_view) {
auto iv_it = dev_data->imageViewMap.find(image_view);
if (iv_it == dev_data->imageViewMap.end()) {
return nullptr;
}
return iv_it->second.get();
}
// Return sampler node ptr for specified sampler or else NULL
SAMPLER_STATE *GetSamplerState(const layer_data *dev_data, VkSampler sampler) {
auto sampler_it = dev_data->samplerMap.find(sampler);
if (sampler_it == dev_data->samplerMap.end()) {
return nullptr;
}
return sampler_it->second.get();
}
// Return image state ptr for specified image or else NULL
IMAGE_STATE *GetImageState(const layer_data *dev_data, VkImage image) {
auto img_it = dev_data->imageMap.find(image);
if (img_it == dev_data->imageMap.end()) {
return nullptr;
}
return img_it->second.get();
}
// Return buffer state ptr for specified buffer or else NULL
BUFFER_STATE *GetBufferState(const layer_data *dev_data, VkBuffer buffer) {
auto buff_it = dev_data->bufferMap.find(buffer);
if (buff_it == dev_data->bufferMap.end()) {
return nullptr;
}
return buff_it->second.get();
}
// Return swapchain node for specified swapchain or else NULL
SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
auto swp_it = dev_data->swapchainMap.find(swapchain);
if (swp_it == dev_data->swapchainMap.end()) {
return nullptr;
}
return swp_it->second.get();
}
// Return buffer node ptr for specified buffer or else NULL
BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
auto bv_it = dev_data->bufferViewMap.find(buffer_view);
if (bv_it == dev_data->bufferViewMap.end()) {
return nullptr;
}
return bv_it->second.get();
}
FENCE_NODE *GetFenceNode(layer_data *dev_data, VkFence fence) {
auto it = dev_data->fenceMap.find(fence);
if (it == dev_data->fenceMap.end()) {
return nullptr;
}
return &it->second;
}
EVENT_STATE *GetEventNode(layer_data *dev_data, VkEvent event) {
auto it = dev_data->eventMap.find(event);
if (it == dev_data->eventMap.end()) {
return nullptr;
}
return &it->second;
}
QUERY_POOL_NODE *GetQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
auto it = dev_data->queryPoolMap.find(query_pool);
if (it == dev_data->queryPoolMap.end()) {
return nullptr;
}
return &it->second;
}
QUEUE_STATE *GetQueueState(layer_data *dev_data, VkQueue queue) {
auto it = dev_data->queueMap.find(queue);
if (it == dev_data->queueMap.end()) {
return nullptr;
}
return &it->second;
}
SEMAPHORE_NODE *GetSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
auto it = dev_data->semaphoreMap.find(semaphore);
if (it == dev_data->semaphoreMap.end()) {
return nullptr;
}
return &it->second;
}
COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
auto it = dev_data->commandPoolMap.find(pool);
if (it == dev_data->commandPoolMap.end()) {
return nullptr;
}
return &it->second;
}
PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
auto it = instance_data->physical_device_map.find(phys);
if (it == instance_data->physical_device_map.end()) {
return nullptr;
}
return &it->second;
}
SURFACE_STATE *GetSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
auto it = instance_data->surface_map.find(surface);
if (it == instance_data->surface_map.end()) {
return nullptr;
}
return &it->second;
}
// Return ptr to memory binding for given handle of specified type
static BINDABLE *GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
switch (type) {
case kVulkanObjectTypeImage:
return GetImageState(dev_data, VkImage(handle));
case kVulkanObjectTypeBuffer:
return GetBufferState(dev_data, VkBuffer(handle));
default:
break;
}
return nullptr;
}
// prototype
GLOBAL_CB_NODE *GetCBNode(layer_data const *, const VkCommandBuffer);
// Return ptr to info in map container containing mem, or NULL if not found
// Calls to this function should be wrapped in mutex
DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
auto mem_it = dev_data->memObjMap.find(mem);
if (mem_it == dev_data->memObjMap.end()) {
return NULL;
}
return mem_it->second.get();
}
static void AddMemObjInfo(layer_data *dev_data, void *object, const VkDeviceMemory mem, const VkMemoryAllocateInfo *pAllocateInfo) {
assert(object != NULL);
auto *mem_info = new DEVICE_MEM_INFO(object, mem, pAllocateInfo);
dev_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(mem_info);
auto dedicated = lvl_find_in_chain<VkMemoryDedicatedAllocateInfoKHR>(pAllocateInfo->pNext);
if (dedicated) {
mem_info->is_dedicated = true;
mem_info->dedicated_buffer = dedicated->buffer;
mem_info->dedicated_image = dedicated->image;
}
}
// Create binding link between given sampler and command buffer node
void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
sampler_state->cb_bindings.insert(cb_node);
cb_node->object_bindings.insert({HandleToUint64(sampler_state->sampler), kVulkanObjectTypeSampler});
}
// Create binding link between given image node and command buffer node
void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
// Skip validation if this image was created through WSI
if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
// First update CB binding in MemObj mini CB list
for (auto mem_binding : image_state->GetBoundMemory()) {
DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
if (pMemInfo) {
pMemInfo->cb_bindings.insert(cb_node);
// Now update CBInfo's Mem reference list
cb_node->memObjs.insert(mem_binding);
}
}
// Now update cb binding for image
cb_node->object_bindings.insert({HandleToUint64(image_state->image), kVulkanObjectTypeImage});
image_state->cb_bindings.insert(cb_node);
}
}
// Create binding link between given image view node and its image with command buffer node
void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
// First add bindings for imageView
view_state->cb_bindings.insert(cb_node);
cb_node->object_bindings.insert({HandleToUint64(view_state->image_view), kVulkanObjectTypeImageView});
auto image_state = GetImageState(dev_data, view_state->create_info.image);
// Add bindings for image within imageView
if (image_state) {
AddCommandBufferBindingImage(dev_data, cb_node, image_state);
}
}
// Create binding link between given buffer node and command buffer node
void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
// First update CB binding in MemObj mini CB list
for (auto mem_binding : buffer_state->GetBoundMemory()) {
DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
if (pMemInfo) {
pMemInfo->cb_bindings.insert(cb_node);
// Now update CBInfo's Mem reference list
cb_node->memObjs.insert(mem_binding);
}
}
// Now update cb binding for buffer
cb_node->object_bindings.insert({HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer});
buffer_state->cb_bindings.insert(cb_node);
}
// Create binding link between given buffer view node and its buffer with command buffer node
void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
// First add bindings for bufferView
view_state->cb_bindings.insert(cb_node);
cb_node->object_bindings.insert({HandleToUint64(view_state->buffer_view), kVulkanObjectTypeBufferView});
auto buffer_state = GetBufferState(dev_data, view_state->create_info.buffer);
// Add bindings for buffer within bufferView
if (buffer_state) {
AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
}
}
// For every mem obj bound to particular CB, free bindings related to that CB
static void ClearCmdBufAndMemReferences(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
if (cb_node) {
if (cb_node->memObjs.size() > 0) {
for (auto mem : cb_node->memObjs) {
DEVICE_MEM_INFO *pInfo = GetMemObjInfo(dev_data, mem);
if (pInfo) {
pInfo->cb_bindings.erase(cb_node);
}
}
cb_node->memObjs.clear();
}
}
}
// Clear a single object binding from given memory object
static void ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) {
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
// This obj is bound to a memory object. Remove the reference to this object in that memory object's list
if (mem_info) {
mem_info->obj_bindings.erase({handle, type});
}
}
// ClearMemoryObjectBindings clears the binding of objects to memory
// For the given object it pulls the memory bindings and makes sure that the bindings
// no longer refer to the object being cleared. This occurs when objects are destroyed.
void ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
if (mem_binding) {
if (!mem_binding->sparse) {
ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
} else { // Sparse, clear all bindings
for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
}
}
}
}
// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
const char *type_name, std::string error_code) {
bool result = false;
if (VK_NULL_HANDLE == mem) {
result =
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, error_code,
"%s: Vk%s object 0x%" PRIx64 " used with no memory bound. Memory should be bound by calling vkBind%sMemory().",
api_name, type_name, handle, type_name);
} else if (MEMORY_UNBOUND == mem) {
result =
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, error_code,
"%s: Vk%s object 0x%" PRIx64
" used with no memory bound and previously bound memory was freed. Memory must not be freed prior to this "
"operation.",
api_name, type_name, handle);
}
return result;
}
// Check to see if memory was ever bound to this image
bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
const std::string &error_code) {
bool result = false;
if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), api_name, "Image",
error_code);
}
return result;
}
// Check to see if memory was bound to this buffer
bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
const std::string &error_code) {
bool result = false;
if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), api_name,
"Buffer", error_code);
}
return result;
}
// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
// Corresponding valid usage checks are in ValidateSetMemBinding().
static void SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, BINDABLE *mem_binding, VkDeviceSize memory_offset,
uint64_t handle, VulkanObjectType type, const char *apiName) {
assert(mem_binding);
mem_binding->binding.mem = mem;
mem_binding->UpdateBoundMemorySet(); // force recreation of cached set
mem_binding->binding.offset = memory_offset;
mem_binding->binding.size = mem_binding->requirements.size;
if (mem != VK_NULL_HANDLE) {
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
mem_info->obj_bindings.insert({handle, type});
// For image objects, make sure default memory state is correctly set
// TODO : What's the best/correct way to handle this?
if (kVulkanObjectTypeImage == type) {
auto const image_state = reinterpret_cast<const IMAGE_STATE *>(mem_binding);
if (image_state) {
VkImageCreateInfo ici = image_state->createInfo;
if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
// TODO:: More memory state transition stuff.
}
}
}
}
}
}
// Valid usage checks for a call to SetMemBinding().
// For NULL mem case, output warning
// Make sure given object is in global object map
// IF a previous binding existed, output validation error
// Otherwise, add reference from objectInfo to memoryInfo
// Add reference off of objInfo
// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
static bool ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
const char *apiName) {
bool skip = false;
// It's an error to bind an object to NULL memory
if (mem != VK_NULL_HANDLE) {
BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
assert(mem_binding);
if (mem_binding->sparse) {
std::string error_code = "VUID-vkBindImageMemory-image-01045";
const char *handle_type = "IMAGE";
if (type == kVulkanObjectTypeBuffer) {
error_code = "VUID-vkBindBufferMemory-buffer-01030";
handle_type = "BUFFER";
} else {
assert(type == kVulkanObjectTypeImage);
}
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), error_code,
"In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
") which was created with sparse memory flags (VK_%s_CREATE_SPARSE_*_BIT).",
apiName, HandleToUint64(mem), handle, handle_type);
}
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
DEVICE_MEM_INFO *prev_binding = GetMemObjInfo(dev_data, mem_binding->binding.mem);
if (prev_binding) {
std::string error_code = "VUID-vkBindImageMemory-image-01044";
if (type == kVulkanObjectTypeBuffer) {
error_code = "VUID-vkBindBufferMemory-buffer-01029";
} else {
assert(type == kVulkanObjectTypeImage);
}
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), error_code,
"In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
") which has already been bound to mem object 0x%" PRIx64 ".",
apiName, HandleToUint64(mem), handle, HandleToUint64(prev_binding->mem));
} else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), kVUID_Core_MemTrack_RebindObject,
"In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
"Vulkan so this attempt to bind to new memory is not allowed.",
apiName, HandleToUint64(mem), handle);
}
}
}
return skip;
}
// For NULL mem case, clear any previous binding Else...
// Make sure given object is in its object map
// IF a previous binding existed, update binding
// Add reference from objectInfo to memoryInfo
// Add reference off of object's binding info
// Return VK_TRUE if addition is successful, VK_FALSE otherwise
static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type) {
bool skip = VK_FALSE;
// Handle NULL case separately, just clear previous binding & decrement reference
if (binding.mem == VK_NULL_HANDLE) {
// TODO : This should cause the range of the resource to be unbound according to spec
} else {
BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
assert(mem_binding);
if (mem_binding) { // Invalid handles are reported by object tracker, but Get returns NULL for them, so avoid SEGV here
assert(mem_binding->sparse);
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, binding.mem);
if (mem_info) {
mem_info->obj_bindings.insert({handle, type});
// Need to set mem binding for this object
mem_binding->sparse_bindings.insert(binding);
mem_binding->UpdateBoundMemorySet();
}
}
}
return skip;
}
// Check object status for selected flag state
static bool ValidateStatus(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
const char *fail_msg, std::string const msg_code) {
if (!(pNode->status & status_mask)) {
return log_msg(dev_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pNode->commandBuffer), msg_code, "command buffer object 0x%" PRIx64 ": %s..",
HandleToUint64(pNode->commandBuffer), fail_msg);
}
return false;
}
// Retrieve pipeline node ptr for given pipeline object
static PIPELINE_STATE *GetPipelineState(layer_data const *dev_data, VkPipeline pipeline) {
auto it = dev_data->pipelineMap.find(pipeline);
if (it == dev_data->pipelineMap.end()) {
return nullptr;
}
return it->second.get();
}
RENDER_PASS_STATE *GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass) {
auto it = dev_data->renderPassMap.find(renderpass);
if (it == dev_data->renderPassMap.end()) {
return nullptr;
}
return it->second.get();
}
std::shared_ptr<RENDER_PASS_STATE> GetRenderPassStateSharedPtr(layer_data const *dev_data, VkRenderPass renderpass) {
auto it = dev_data->renderPassMap.find(renderpass);
if (it == dev_data->renderPassMap.end()) {
return nullptr;
}
return it->second;
}
FRAMEBUFFER_STATE *GetFramebufferState(const layer_data *dev_data, VkFramebuffer framebuffer) {
auto it = dev_data->frameBufferMap.find(framebuffer);
if (it == dev_data->frameBufferMap.end()) {
return nullptr;
}
return it->second.get();
}
std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(layer_data const *dev_data,
VkDescriptorSetLayout dsLayout) {
auto it = dev_data->descriptorSetLayoutMap.find(dsLayout);
if (it == dev_data->descriptorSetLayoutMap.end()) {
return nullptr;
}
return it->second;
}
static PIPELINE_LAYOUT_NODE const *GetPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
auto it = dev_data->pipelineLayoutMap.find(pipeLayout);
if (it == dev_data->pipelineLayoutMap.end()) {
return nullptr;
}
return &it->second;
}
shader_module const *GetShaderModuleState(layer_data const *dev_data, VkShaderModule module) {
auto it = dev_data->shaderModuleMap.find(module);
if (it == dev_data->shaderModuleMap.end()) {
return nullptr;
}
return it->second.get();
}
// Return true if for a given PSO, the given state enum is dynamic, else return false
static bool IsDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
}
}
return false;
}
// Validate state stored as flags at time of draw call
static bool ValidateDrawStateFlags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
std::string const msg_code) {
bool result = false;
if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
result |= ValidateStatus(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic line width state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pRasterizationState &&
(pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
result |= ValidateStatus(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic depth bias state not set for this command buffer", msg_code);
}
if (pPipe->blendConstantsEnabled) {
result |= ValidateStatus(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic blend constants state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pDepthStencilState &&
(pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
result |= ValidateStatus(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic depth bounds state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pDepthStencilState &&
(pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
result |= ValidateStatus(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic stencil read mask state not set for this command buffer", msg_code);
result |= ValidateStatus(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic stencil write mask state not set for this command buffer", msg_code);
result |= ValidateStatus(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic stencil reference state not set for this command buffer", msg_code);
}
if (indexed) {
result |= ValidateStatus(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
}
return result;
}
static bool LogInvalidAttachmentMessage(layer_data const *dev_data, const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach,
uint32_t secondary_attach, const char *msg, const char *caller, std::string error_code) {
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(rp1_state->renderPass), error_code,
"%s: RenderPasses incompatible between %s w/ renderPass 0x%" PRIx64 " and %s w/ renderPass 0x%" PRIx64
" Attachment %u is not compatible with %u: %s.",
caller, type1_string, HandleToUint64(rp1_state->renderPass), type2_string, HandleToUint64(rp2_state->renderPass),
primary_attach, secondary_attach, msg);
}
static bool ValidateAttachmentCompatibility(layer_data const *dev_data, const char *type1_string,
const RENDER_PASS_STATE *rp1_state, const char *type2_string,
const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach,
const char *caller, std::string error_code) {
bool skip = false;
const auto &primaryPassCI = rp1_state->createInfo;
const auto &secondaryPassCI = rp2_state->createInfo;
if (primaryPassCI.attachmentCount <= primary_attach) {
primary_attach = VK_ATTACHMENT_UNUSED;
}
if (secondaryPassCI.attachmentCount <= secondary_attach) {
secondary_attach = VK_ATTACHMENT_UNUSED;
}
if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) {
return skip;
}
if (primary_attach == VK_ATTACHMENT_UNUSED) {
skip |= LogInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
secondary_attach, "The first is unused while the second is not.", caller, error_code);
return skip;
}
if (secondary_attach == VK_ATTACHMENT_UNUSED) {
skip |= LogInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
secondary_attach, "The second is unused while the first is not.", caller, error_code);
return skip;
}
if (primaryPassCI.pAttachments[primary_attach].format != secondaryPassCI.pAttachments[secondary_attach].format) {
skip |= LogInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
secondary_attach, "They have different formats.", caller, error_code);
}
if (primaryPassCI.pAttachments[primary_attach].samples != secondaryPassCI.pAttachments[secondary_attach].samples) {
skip |= LogInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
secondary_attach, "They have different samples.", caller, error_code);
}
if (primaryPassCI.pAttachments[primary_attach].flags != secondaryPassCI.pAttachments[secondary_attach].flags) {
skip |= LogInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
secondary_attach, "They have different flags.", caller, error_code);
}
return skip;
}
static bool ValidateSubpassCompatibility(layer_data const *dev_data, const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass,
const char *caller, std::string error_code) {
bool skip = false;
const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass];
const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass];
uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.inputAttachmentCount) {
primary_input_attach = primary_desc.pInputAttachments[i].attachment;
}
if (i < secondary_desc.inputAttachmentCount) {
secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
secondary_input_attach, caller, error_code);
}
uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount) {
primary_color_attach = primary_desc.pColorAttachments[i].attachment;
}
if (i < secondary_desc.colorAttachmentCount) {
secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_color_attach,
secondary_color_attach, caller, error_code);
uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
}
if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach,
secondary_resolve_attach, caller, error_code);
}
uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
if (primary_desc.pDepthStencilAttachment) {
primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
}
if (secondary_desc.pDepthStencilAttachment) {
secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
}
skip |= ValidateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach,
secondary_depthstencil_attach, caller, error_code);
return skip;
}
// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
// This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
// will then feed into this function
static bool ValidateRenderPassCompatibility(layer_data const *dev_data, const char *type1_string,
const RENDER_PASS_STATE *rp1_state, const char *type2_string,
const RENDER_PASS_STATE *rp2_state, const char *caller, std::string error_code) {
bool skip = false;
if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(rp1_state->renderPass), error_code,
"%s: RenderPasses incompatible between %s w/ renderPass 0x%" PRIx64
" with a subpassCount of %u and %s w/ renderPass 0x%" PRIx64 " with a subpassCount of %u.",
caller, type1_string, HandleToUint64(rp1_state->renderPass), rp1_state->createInfo.subpassCount,
type2_string, HandleToUint64(rp2_state->renderPass), rp2_state->createInfo.subpassCount);
} else {
for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) {
skip |= ValidateSubpassCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code);
}
}
return skip;
}
// Return Set node ptr for specified set or else NULL
cvdescriptorset::DescriptorSet *GetSetNode(const layer_data *dev_data, VkDescriptorSet set) {
auto set_it = dev_data->setMap.find(set);
if (set_it == dev_data->setMap.end()) {
return NULL;
}
return set_it->second;
}
// For given pipeline, return number of MSAA samples, or one if MSAA disabled
static VkSampleCountFlagBits GetNumSamples(PIPELINE_STATE const *pipe) {
if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
}
return VK_SAMPLE_COUNT_1_BIT;
}
static void ListBits(std::ostream &s, uint32_t bits) {
for (int i = 0; i < 32 && bits; i++) {
if (bits & (1 << i)) {
s << i;
bits &= ~(1 << i);
if (bits) {
s << ",";
}
}
}
}
// Validate draw-time state related to the PSO
static bool ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
CMD_TYPE cmd_type, PIPELINE_STATE const *pPipeline, const char *caller) {
bool skip = false;
// Verify vertex binding
if (pPipeline->vertex_binding_descriptions_.size() > 0) {
for (size_t i = 0; i < pPipeline->vertex_binding_descriptions_.size(); i++) {
const auto vertex_binding = pPipeline->vertex_binding_descriptions_[i].binding;
if ((pCB->current_draw_data.vertex_buffer_bindings.size() < (vertex_binding + 1)) ||
(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer == VK_NULL_HANDLE)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_VtxIndexOutOfBounds,
"The Pipeline State Object (0x%" PRIx64
") expects that this Command Buffer's vertex binding Index %u should be set via "
"vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
"index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
HandleToUint64(state.pipeline_state->pipeline), vertex_binding, i, vertex_binding);
}
}
// Verify vertex attribute address alignment
for (size_t i = 0; i < pPipeline->vertex_attribute_descriptions_.size(); i++) {
const auto &attribute_description = pPipeline->vertex_attribute_descriptions_[i];
const auto vertex_binding = attribute_description.binding;
const auto attribute_offset = attribute_description.offset;
const auto attribute_format = attribute_description.format;
const auto &vertex_binding_map_it = pPipeline->vertex_binding_to_index_map_.find(vertex_binding);
if ((vertex_binding_map_it != pPipeline->vertex_binding_to_index_map_.cend()) &&
(vertex_binding < pCB->current_draw_data.vertex_buffer_bindings.size()) &&
(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer != VK_NULL_HANDLE)) {
const auto vertex_buffer_stride = pPipeline->vertex_binding_descriptions_[vertex_binding_map_it->second].stride;
const auto vertex_buffer_offset = pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].offset;
const auto buffer_state =
GetBufferState(dev_data, pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer);
// Use only memory binding offset as base memory should be properly aligned by the driver
const auto buffer_binding_address = buffer_state->binding.offset + vertex_buffer_offset;
// Use 1 as vertex/instance index to use buffer stride as well
const auto attrib_address = buffer_binding_address + vertex_buffer_stride + attribute_offset;
if (SafeModulo(attrib_address, FormatAlignment(attribute_format)) != 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer),
kVUID_Core_DrawState_InvalidVtxAttributeAlignment,
"Invalid attribAddress alignment for vertex attribute " PRINTF_SIZE_T_SPECIFIER
" from "
"pipeline (0x%" PRIx64 ") and vertex buffer (0x%" PRIx64 ").",
i, HandleToUint64(state.pipeline_state->pipeline),
HandleToUint64(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer));
}
}
}
} else {
if ((!pCB->current_draw_data.vertex_buffer_bindings.empty()) && (!pCB->vertex_buffer_used)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer),
kVUID_Core_DrawState_VtxIndexOutOfBounds,
"Vertex buffers are bound to command buffer (0x%" PRIx64
") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIx64 ").",
HandleToUint64(pCB->commandBuffer), HandleToUint64(state.pipeline_state->pipeline));
}
}
// If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
// Skip check if rasterization is disabled or there is no viewport.
if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
(pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
pPipeline->graphicsPipelineCI.pViewportState) {
bool dynViewport = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
bool dynScissor = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
if (dynViewport) {
const auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
const auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
if (missingViewportMask) {
std::stringstream ss;
ss << "Dynamic viewport(s) ";
ListBits(ss, missingViewportMask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_ViewportScissorMismatch, "%s", ss.str().c_str());
}
}
if (dynScissor) {
const auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
const auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
if (missingScissorMask) {
std::stringstream ss;
ss << "Dynamic scissor(s) ";
ListBits(ss, missingScissorMask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_ViewportScissorMismatch, "%s", ss.str().c_str());
}
}
}
// Verify that any MSAA request in PSO matches sample# in bound FB
// Skip the check if rasterization is disabled.
if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
(pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
VkSampleCountFlagBits pso_num_samples = GetNumSamples(pPipeline);
if (pCB->activeRenderPass) {
const auto render_pass_info = pCB->activeRenderPass->createInfo.ptr();
const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
uint32_t i;
unsigned subpass_num_samples = 0;
for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment != VK_ATTACHMENT_UNUSED)
subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
}
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
}
if (!dev_data->extensions.vk_amd_mixed_attachment_samples &&
((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_NumSamplesMismatch,
"Num samples mismatch! At draw-time in Pipeline (0x%" PRIx64
") with %u samples while current RenderPass (0x%" PRIx64 ") w/ %u samples!",
HandleToUint64(pPipeline->pipeline), pso_num_samples,
HandleToUint64(pCB->activeRenderPass->renderPass), subpass_num_samples);
}
} else {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_NoActiveRenderpass,
"No active render pass found at draw-time in Pipeline (0x%" PRIx64 ")!",
HandleToUint64(pPipeline->pipeline));
}
}
// Verify that PSO creation renderPass is compatible with active renderPass
if (pCB->activeRenderPass) {
// TODO: Move all of the error codes common across different Draws into a LUT accessed by cmd_type
// TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted
// Error codes for renderpass and subpass mismatches
auto rp_error = "VUID-vkCmdDraw-renderPass-00435", sp_error = "VUID-vkCmdDraw-subpass-00436";
switch (cmd_type) {
case CMD_DRAWINDEXED:
rp_error = "VUID-vkCmdDrawIndexed-renderPass-00454";
sp_error = "VUID-vkCmdDrawIndexed-subpass-00455";
break;
case CMD_DRAWINDIRECT:
rp_error = "VUID-vkCmdDrawIndirect-renderPass-00479";
sp_error = "VUID-vkCmdDrawIndirect-subpass-00480";
break;
case CMD_DRAWINDIRECTCOUNTAMD:
rp_error = "VUID-vkCmdDrawIndirectCountAMD-renderPass-00507";
sp_error = "VUID-vkCmdDrawIndirectCountAMD-subpass-00508";
break;
case CMD_DRAWINDIRECTCOUNTKHR:
rp_error = "VUID-vkCmdDrawIndirectCountKHR-renderPass-03113";
sp_error = "VUID-vkCmdDrawIndirectCountKHR-subpass-03114";
break;
case CMD_DRAWINDEXEDINDIRECT:
rp_error = "VUID-vkCmdDrawIndexedIndirect-renderPass-00531";
sp_error = "VUID-vkCmdDrawIndexedIndirect-subpass-00532";
break;
case CMD_DRAWINDEXEDINDIRECTCOUNTAMD:
rp_error = "VUID-vkCmdDrawIndexedIndirectCountAMD-renderPass-00560";
sp_error = "VUID-vkCmdDrawIndexedIndirectCountAMD-subpass-00561";
break;
case CMD_DRAWINDEXEDINDIRECTCOUNTKHR:
rp_error = "VUID-vkCmdDrawIndexedIndirectCountKHR-renderPass-03145";
sp_error = "VUID-vkCmdDrawIndexedIndirectCountKHR-subpass-03146";
break;
case CMD_DRAWMESHTASKSNV:
rp_error = "VUID-vkCmdDrawMeshTasksNV-renderPass-02120";
sp_error = "VUID-vkCmdDrawMeshTasksNV-subpass-02121";
break;
case CMD_DRAWMESHTASKSINDIRECTNV:
rp_error = "VUID-vkCmdDrawMeshTasksIndirectNV-renderPass-02148";
sp_error = "VUID-vkCmdDrawMeshTasksIndirectNV-subpass-02149";
break;
case CMD_DRAWMESHTASKSINDIRECTCOUNTNV:
rp_error = "VUID-vkCmdDrawMeshTasksIndirectCountNV-renderPass-02184";
sp_error = "VUID-vkCmdDrawMeshTasksIndirectCountNV-subpass-02185";
break;
default:
assert(CMD_DRAW == cmd_type);
break;
}
std::string err_string;
if (pCB->activeRenderPass->renderPass != pPipeline->rp_state->renderPass) {
// renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
skip |= ValidateRenderPassCompatibility(dev_data, "active render pass", pCB->activeRenderPass, "pipeline state object",
pPipeline->rp_state.get(), caller, rp_error);
}
if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), sp_error, "Pipeline was built for subpass %u but used in subpass %u.",
pPipeline->graphicsPipelineCI.subpass, pCB->activeSubpass);
}
}
return skip;
}
// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
// pipelineLayout[layoutIndex]
static bool VerifySetLayoutCompatibility(const cvdescriptorset::DescriptorSet *descriptor_set,
PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
string &errorMsg) {
auto num_sets = pipeline_layout->set_layouts.size();
if (layoutIndex >= num_sets) {
stringstream errorStr;
errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
<< " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
<< layoutIndex;
errorMsg = errorStr.str();
return false;
}
if (descriptor_set->IsPushDescriptor()) return true;
auto layout_node = pipeline_layout->set_layouts[layoutIndex];
return descriptor_set->IsCompatible(layout_node.get(), &errorMsg);
}
// Validate overall state at the time of a draw call
static bool ValidateCmdBufDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, CMD_TYPE cmd_type, const bool indexed,
const VkPipelineBindPoint bind_point, const char *function, const std::string &pipe_err_code,
const std::string &state_err_code) {
bool result = false;
auto const &state = cb_node->lastBound[bind_point];
PIPELINE_STATE *pPipe = state.pipeline_state;
if (nullptr == pPipe) {
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), pipe_err_code,
"Must not call %s on this command buffer while there is no %s pipeline bound.", function,
bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS ? "Graphics" : "Compute");
}
// First check flag states
if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
result = ValidateDrawStateFlags(dev_data, cb_node, pPipe, indexed, state_err_code);
// Now complete other state checks
string errorString;
auto const &pipeline_layout = pPipe->pipeline_layout;
for (const auto &set_binding_pair : pPipe->active_slots) {
uint32_t setIndex = set_binding_pair.first;
// If valid set is not bound throw an error
if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_DescriptorSetNotBound,
"VkPipeline 0x%" PRIx64 " uses set #%u but that set is not bound.", HandleToUint64(pPipe->pipeline),
setIndex);
} else if (!VerifySetLayoutCompatibility(state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex, errorString)) {
// Set is bound but not compatible w/ overlapping pipeline_layout from PSO
VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(setHandle), kVUID_Core_DrawState_PipelineLayoutsIncompatible,
"VkDescriptorSet (0x%" PRIx64
") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIx64 " due to: %s",
HandleToUint64(setHandle), setIndex, HandleToUint64(pipeline_layout.layout), errorString.c_str());
} else { // Valid set is bound and layout compatible, validate that it's updated
// Pull the set node
cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
// Validate the draw-time state for this descriptor set
std::string err_str;
if (!descriptor_set->IsPushDescriptor()) {
// For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor
// binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks.
// Here, the currently bound pipeline determines whether an image validation check is redundant...
// for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline.
const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_node,
pPipe);
const auto &binding_req_map = reduced_map.Map();
if (!descriptor_set->ValidateDrawState(binding_req_map, state.dynamicOffsets[setIndex], cb_node, function,
&err_str)) {
auto set = descriptor_set->GetSet();
result |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(set), kVUID_Core_DrawState_DescriptorSetNotUpdated,
"Descriptor set 0x%" PRIx64 " bound as set #%u encountered the following validation error at %s time: %s",
HandleToUint64(set), setIndex, function, err_str.c_str());
}
}
}
}
// Check general pipeline state that needs to be validated at drawtime
if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
result |= ValidatePipelineDrawtimeState(dev_data, state, cb_node, cmd_type, pPipe, function);
return result;
}
static void UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
auto const &state = cb_state->lastBound[bind_point];
PIPELINE_STATE *pPipe = state.pipeline_state;
if (VK_NULL_HANDLE != state.pipeline_layout) {
for (const auto &set_binding_pair : pPipe->active_slots) {
uint32_t setIndex = set_binding_pair.first;
// Pull the set node
cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
if (!descriptor_set->IsPushDescriptor()) {
// For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor binding
const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_state);
const auto &binding_req_map = reduced_map.Map();
// Bind this set and its active descriptor resources to the command buffer
descriptor_set->BindCommandBuffer(cb_state, binding_req_map);
// For given active slots record updated images & buffers
descriptor_set->GetStorageUpdates(binding_req_map, &cb_state->updateBuffers, &cb_state->updateImages);
}
}
}
if (!pPipe->vertex_binding_descriptions_.empty()) {
cb_state->vertex_buffer_used = true;
}
}
static bool ValidatePipelineLocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
int pipelineIndex) {
bool skip = false;
PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
// If create derivative bit is set, check that we've specified a base
// pipeline correctly, and that the base pipeline was created to allow
// derivatives.
if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
PIPELINE_STATE *pBasePipeline = nullptr;
if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
(pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
// This check is a superset of "VUID-VkGraphicsPipelineCreateInfo-flags-00724" and
// "VUID-VkGraphicsPipelineCreateInfo-flags-00725"
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
} else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-vkCreateGraphicsPipelines-flags-00720",
"Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
} else {
pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex].get();
}
} else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
pBasePipeline = GetPipelineState(dev_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
}
if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
}
}
return skip;
}
// UNLOCKED pipeline validation. DO NOT lookup objects in the layer_data->* maps in this function.
static bool ValidatePipelineUnlocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
int pipelineIndex) {
bool skip = false;
PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
// Ensure the subpass index is valid. If not, then ValidateAndCapturePipelineShaderState
// produces nonsense errors that confuse users. Other layers should already
// emit errors for renderpass being invalid.
auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass];
if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-subpass-00759",
"Invalid Pipeline CreateInfo State: Subpass index %u is out of range for this renderpass (0..%u).",
pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1);
subpass_desc = nullptr;
}
if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746",
"vkCreateGraphicsPipelines(): Render pass (0x%" PRIx64
") subpass %u has colorAttachmentCount of %u which doesn't match the pColorBlendState->attachmentCount of %u.",
HandleToUint64(pPipeline->rp_state->renderPass), pPipeline->graphicsPipelineCI.subpass,
subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount);
}
if (!dev_data->enabled_features.core.independentBlend) {
if (pPipeline->attachments.size() > 1) {
VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
// Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
// settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
// only attachment state, so memcmp is best suited for the comparison
if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
sizeof(pAttachments[0]))) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, HandleToUint64(pPipeline->pipeline),
"VUID-VkPipelineColorBlendStateCreateInfo-pAttachments-00605",
"Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of "
"pAttachments must be identical.");
break;
}
}
}
}
if (!dev_data->enabled_features.core.logicOp &&
(pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606",
"Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE.");
}
for (size_t i = 0; i < pPipeline->attachments.size(); i++) {
if ((pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!dev_data->enabled_features.core.dualSrcBlend) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].srcColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].srcColorBlendFactor);
}
}
if ((pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!dev_data->enabled_features.core.dualSrcBlend) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].dstColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].dstColorBlendFactor);
}
}
if ((pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!dev_data->enabled_features.core.dualSrcBlend) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].srcAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].srcAlphaBlendFactor);
}
}
if ((pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!dev_data->enabled_features.core.dualSrcBlend) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].dstAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].dstAlphaBlendFactor);
}
}
}
}
if (ValidateAndCapturePipelineShaderState(dev_data, pPipeline)) {
skip = true;
}
// Each shader's stage must be unique
if (pPipeline->duplicate_shaders) {
for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
if (pPipeline->duplicate_shaders & stage) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
}
}
}
if (dev_data->extensions.vk_nv_mesh_shader) {
// VS or mesh is required
if (!(pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_MESH_BIT_NV))) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-stage-02096",
"Invalid Pipeline CreateInfo State: Vertex Shader or Mesh Shader required.");
}
// Can't mix mesh and VTG
if ((pPipeline->active_shaders & (VK_SHADER_STAGE_MESH_BIT_NV | VK_SHADER_STAGE_TASK_BIT_NV)) &&
(pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT |
VK_SHADER_STAGE_GEOMETRY_BIT |
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-pStages-02095",
"Invalid Pipeline CreateInfo State: Geometric shader stages must either be all mesh (mesh | task) "
"or all VTG (vertex, tess control, tess eval, geom).");
}
} else {
// VS is required
if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-stage-00727",
"Invalid Pipeline CreateInfo State: Vertex Shader required.");
}
}
if (!dev_data->enabled_features.mesh_shader.meshShader && (pPipeline->active_shaders & VK_SHADER_STAGE_MESH_BIT_NV)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineShaderStageCreateInfo-stage-02091",
"Invalid Pipeline CreateInfo State: Mesh Shader not supported.");
}
if (!dev_data->enabled_features.mesh_shader.taskShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TASK_BIT_NV)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineShaderStageCreateInfo-stage-02092",
"Invalid Pipeline CreateInfo State: Task Shader not supported.");
}
// Either both or neither TC/TE shaders should be defined
bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
if (has_control && !has_eval) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-pStages-00729",
"Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
}
if (!has_control && has_eval) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-pStages-00730",
"Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
}
// Compute shaders should be specified independent of Gfx shaders
if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-stage-00728",
"Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline.");
}
if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pInputAssemblyState) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline),
"VUID-VkGraphicsPipelineCreateInfo-pStages-02098",
"Invalid Pipeline CreateInfo State: Missing pInputAssemblyState.");
}
// VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
// Mismatching primitive topology and tessellation fails graphics pipeline creation.
if (has_control && has_eval &&
(!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-pStages-00736",
"Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for "
"tessellation pipelines.");
}
if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
if (!has_control || !has_eval) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-topology-00737",
"Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid "
"for tessellation pipelines.");
}
}
// If a rasterization state is provided...
if (pPipeline->graphicsPipelineCI.pRasterizationState) {
if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
(!dev_data->enabled_features.core.depthClamp)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineRasterizationStateCreateInfo-depthClampEnable-00782",
"vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable member "
"of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE.");
}
if (!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
(pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) &&
(!dev_data->enabled_features.core.depthBiasClamp)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_InvalidFeature,
"vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp member "
"of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
"VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled");
}
// If rasterization is enabled...
if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
(!dev_data->enabled_features.core.alphaToOne)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineMultisampleStateCreateInfo-alphaToOneEnable-00785",
"vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable "
"member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE.");
}
// If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline),
"VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00752",
"Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is enabled "
"and subpass uses a depth/stencil attachment.");
} else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
(!dev_data->enabled_features.core.depthBounds)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline),
"VUID-VkPipelineDepthStencilStateCreateInfo-depthBoundsTestEnable-00598",
"vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the "
"depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be "
"set to VK_FALSE.");
}
}
// If subpass uses color attachments, pColorBlendState must be valid pointer
if (subpass_desc) {
uint32_t color_attachment_count = 0;
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
++color_attachment_count;
}
}
if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline),
"VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753",
"Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is enabled and "
"subpass uses color attachments.");
}
}
}
}
if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pVertexInputState) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline),
"VUID-VkGraphicsPipelineCreateInfo-pStages-02097",
"Invalid Pipeline CreateInfo State: Missing pVertexInputState.");
}
auto vi = pPipeline->graphicsPipelineCI.pVertexInputState;
if (vi != NULL) {
for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
VkFormat format = vi->pVertexAttributeDescriptions[j].format;
// Internal call to get format info. Still goes through layers, could potentially go directly to ICD.
VkFormatProperties properties;
dev_data->instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(dev_data->physical_device, format,
&properties);
if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkVertexInputAttributeDescription-format-00623",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
"(%s) is not a supported vertex buffer format.",
pipelineIndex, j, string_VkFormat(format));
}
}
}
if (dev_data->extensions.vk_amd_mixed_attachment_samples) {
VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0);
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
max_sample_count =
std::max(max_sample_count,
pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples);
}
}
if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
max_sample_count =
std::max(max_sample_count,
pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples);
}
if ((pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) &&
(pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples != max_sample_count)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-subpass-01505",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max "
"attachment samples (%s) used in subpass %u.",
pipelineIndex,
string_VkSampleCountFlagBits(pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples),
string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass);
}
}
return skip;
}
// Block of code at start here specifically for managing/tracking DSs
// Return Pool node ptr for specified pool or else NULL
DESCRIPTOR_POOL_STATE *GetDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
auto pool_it = dev_data->descriptorPoolMap.find(pool);
if (pool_it == dev_data->descriptorPoolMap.end()) {
return NULL;
}
return pool_it->second;
}
// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
// func_str is the name of the calling function
// Return false if no errors occur
// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
static bool ValidateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
bool skip = false;
auto set_node = dev_data->setMap.find(set);
if (set_node == dev_data->setMap.end()) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(set), kVUID_Core_DrawState_DoubleDestroy,
"Cannot call %s() on descriptor set 0x%" PRIx64 " that has not been allocated.", func_str.c_str(),
HandleToUint64(set));
} else {
// TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
if (set_node->second->in_use.load()) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(set), "VUID-vkFreeDescriptorSets-pDescriptorSets-00309",
"Cannot call %s() on descriptor set 0x%" PRIx64 " that is in use by a command buffer.",
func_str.c_str(), HandleToUint64(set));
}
}
return skip;
}
// Remove set from setMap and delete the set
static void FreeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
dev_data->setMap.erase(descriptor_set->GetSet());
delete descriptor_set;
}
// Free all DS Pools including their Sets & related sub-structs
// NOTE : Calls to this function should be wrapped in mutex
static void DeletePools(layer_data *dev_data) {
for (auto ii = dev_data->descriptorPoolMap.begin(); ii != dev_data->descriptorPoolMap.end();) {
// Remove this pools' sets from setMap and delete them
for (auto ds : ii->second->sets) {
FreeDescriptorSet(dev_data, ds);
}
ii->second->sets.clear();
delete ii->second;
ii = dev_data->descriptorPoolMap.erase(ii);
}
}
// For given CB object, fetch associated CB Node from map
GLOBAL_CB_NODE *GetCBNode(layer_data const *dev_data, const VkCommandBuffer cb) {
auto it = dev_data->commandBufferMap.find(cb);
if (it == dev_data->commandBufferMap.end()) {
return NULL;
}
return it->second;
}
// If a renderpass is active, verify that the given command type is appropriate for current subpass state
bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
if (!pCB->activeRenderPass) return false;
bool skip = false;
if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
(cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer,
"Commands cannot be called in a subpass using secondary command buffers.");
} else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer,
"vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
}
return skip;
}
bool ValidateCmdQueueFlags(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *caller_name,
VkQueueFlags required_flags, const std::string &error_code) {
auto pool = GetCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
if (pool) {
VkQueueFlags queue_flags = dev_data->phys_dev_properties.queue_family_properties[pool->queueFamilyIndex].queueFlags;
if (!(required_flags & queue_flags)) {
string required_flags_string;
for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
if (flag & required_flags) {
if (required_flags_string.size()) {
required_flags_string += " or ";
}
required_flags_string += string_VkQueueFlagBits(flag);
}
}
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), error_code,
"Cannot call %s on a command buffer allocated from a pool without %s capabilities..", caller_name,
required_flags_string.c_str());
}
}
return false;
}
static char const *GetCauseStr(VK_OBJECT obj) {
if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated";
if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded";
return "destroyed";
}
static bool ReportInvalidCommandBuffer(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const char *call_source) {
bool skip = false;
for (auto obj : cb_state->broken_bindings) {
const char *type_str = object_string[obj.type];
const char *cause_str = GetCauseStr(obj);
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer,
"You are adding %s to command buffer 0x%" PRIx64 " that is invalid because bound %s 0x%" PRIx64 " was %s.",
call_source, HandleToUint64(cb_state->commandBuffer), type_str, obj.handle, cause_str);
}
return skip;
}
// 'commandBuffer must be in the recording state' valid usage error code for each command
// Note: grepping for ^^^^^^^^^ in vk_validation_database is easily massaged into the following list
// Note: C++11 doesn't automatically devolve enum types to the underlying type for hash traits purposes (fixed in C++14)
using CmdTypeHashType = std::underlying_type<CMD_TYPE>::type;
static const std::unordered_map<CmdTypeHashType, std::string> must_be_recording_map = {
{CMD_NONE, kVUIDUndefined}, // UNMATCHED
{CMD_BEGINQUERY, "VUID-vkCmdBeginQuery-commandBuffer-recording"},
{CMD_BEGINRENDERPASS, "VUID-vkCmdBeginRenderPass-commandBuffer-recording"},
{CMD_BINDDESCRIPTORSETS, "VUID-vkCmdBindDescriptorSets-commandBuffer-recording"},
{CMD_BINDINDEXBUFFER, "VUID-vkCmdBindIndexBuffer-commandBuffer-recording"},
{CMD_BINDPIPELINE, "VUID-vkCmdBindPipeline-commandBuffer-recording"},
{CMD_BINDSHADINGRATEIMAGE, "VUID-vkCmdBindShadingRateImageNV-commandBuffer-recording"},
{CMD_BINDVERTEXBUFFERS, "VUID-vkCmdBindVertexBuffers-commandBuffer-recording"},
{CMD_BLITIMAGE, "VUID-vkCmdBlitImage-commandBuffer-recording"},
{CMD_CLEARATTACHMENTS, "VUID-vkCmdClearAttachments-commandBuffer-recording"},
{CMD_CLEARCOLORIMAGE, "VUID-vkCmdClearColorImage-commandBuffer-recording"},
{CMD_CLEARDEPTHSTENCILIMAGE, "VUID-vkCmdClearDepthStencilImage-commandBuffer-recording"},
{CMD_COPYBUFFER, "VUID-vkCmdCopyBuffer-commandBuffer-recording"},
{CMD_COPYBUFFERTOIMAGE, "VUID-vkCmdCopyBufferToImage-commandBuffer-recording"},
{CMD_COPYIMAGE, "VUID-vkCmdCopyImage-commandBuffer-recording"},
{CMD_COPYIMAGETOBUFFER, "VUID-vkCmdCopyImageToBuffer-commandBuffer-recording"},
{CMD_COPYQUERYPOOLRESULTS, "VUID-vkCmdCopyQueryPoolResults-commandBuffer-recording"},
{CMD_DEBUGMARKERBEGINEXT, "VUID-vkCmdDebugMarkerBeginEXT-commandBuffer-recording"},
{CMD_DEBUGMARKERENDEXT, "VUID-vkCmdDebugMarkerEndEXT-commandBuffer-recording"},
{CMD_DEBUGMARKERINSERTEXT, "VUID-vkCmdDebugMarkerInsertEXT-commandBuffer-recording"},
{CMD_DISPATCH, "VUID-vkCmdDispatch-commandBuffer-recording"},
// Exclude KHX (if not already present) { CMD_DISPATCHBASEKHX, "VUID-vkCmdDispatchBase-commandBuffer-recording" },
{CMD_DISPATCHINDIRECT, "VUID-vkCmdDispatchIndirect-commandBuffer-recording"},
{CMD_DRAW, "VUID-vkCmdDraw-commandBuffer-recording"},
{CMD_DRAWINDEXED, "VUID-vkCmdDrawIndexed-commandBuffer-recording"},
{CMD_DRAWINDEXEDINDIRECT, "VUID-vkCmdDrawIndexedIndirect-commandBuffer-recording"},
// Exclude vendor ext (if not already present) { CMD_DRAWINDEXEDINDIRECTCOUNTAMD,
// "VUID-vkCmdDrawIndexedIndirectCountAMD-commandBuffer-recording" },
{CMD_DRAWINDEXEDINDIRECTCOUNTKHR, "VUID-vkCmdDrawIndexedIndirectCountKHR-commandBuffer-recording"},
{CMD_DRAWINDIRECT, "VUID-vkCmdDrawIndirect-commandBuffer-recording"},
// Exclude vendor ext (if not already present) { CMD_DRAWINDIRECTCOUNTAMD,
// "VUID-vkCmdDrawIndirectCountAMD-commandBuffer-recording" },
{CMD_DRAWINDIRECTCOUNTKHR, "VUID-vkCmdDrawIndirectCountKHR-commandBuffer-recording"},
{CMD_DRAWMESHTASKSNV, "VUID-vkCmdDrawMeshTasksNV-commandBuffer-recording"},
{CMD_DRAWMESHTASKSINDIRECTNV, "VUID-vkCmdDrawMeshTasksIndirectNV-commandBuffer-recording"},
{CMD_DRAWMESHTASKSINDIRECTCOUNTNV, "VUID-vkCmdDrawMeshTasksIndirectCountNV-commandBuffer-recording"},
{CMD_ENDCOMMANDBUFFER, "VUID-vkEndCommandBuffer-commandBuffer-00059"},
{CMD_ENDQUERY, "VUID-vkCmdEndQuery-commandBuffer-recording"},
{CMD_ENDRENDERPASS, "VUID-vkCmdEndRenderPass-commandBuffer-recording"},
{CMD_EXECUTECOMMANDS, "VUID-vkCmdExecuteCommands-commandBuffer-recording"},
{CMD_FILLBUFFER, "VUID-vkCmdFillBuffer-commandBuffer-recording"},
{CMD_NEXTSUBPASS, "VUID-vkCmdNextSubpass-commandBuffer-recording"},
{CMD_PIPELINEBARRIER, "VUID-vkCmdPipelineBarrier-commandBuffer-recording"},
// Exclude vendor ext (if not already present) { CMD_PROCESSCOMMANDSNVX, "VUID-vkCmdProcessCommandsNVX-commandBuffer-recording"
// },
{CMD_PUSHCONSTANTS, "VUID-vkCmdPushConstants-commandBuffer-recording"},
{CMD_PUSHDESCRIPTORSETKHR, "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-recording"},
{CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-recording"},
// Exclude vendor ext (if not already present) { CMD_RESERVESPACEFORCOMMANDSNVX,
// "VUID-vkCmdReserveSpaceForCommandsNVX-commandBuffer-recording" },
{CMD_RESETEVENT, "VUID-vkCmdResetEvent-commandBuffer-recording"},
{CMD_RESETQUERYPOOL, "VUID-vkCmdResetQueryPool-commandBuffer-recording"},
{CMD_RESOLVEIMAGE, "VUID-vkCmdResolveImage-commandBuffer-recording"},
{CMD_SETBLENDCONSTANTS, "VUID-vkCmdSetBlendConstants-commandBuffer-recording"},
{CMD_SETDEPTHBIAS, "VUID-vkCmdSetDepthBias-commandBuffer-recording"},
{CMD_SETDEPTHBOUNDS, "VUID-vkCmdSetDepthBounds-commandBuffer-recording"},
// Exclude KHX (if not already present) { CMD_SETDEVICEMASKKHX, "VUID-vkCmdSetDeviceMask-commandBuffer-recording" },
{CMD_SETDISCARDRECTANGLEEXT, "VUID-vkCmdSetDiscardRectangleEXT-commandBuffer-recording"},
{CMD_SETEVENT, "VUID-vkCmdSetEvent-commandBuffer-recording"},
{CMD_SETEXCLUSIVESCISSOR, "VUID-vkCmdSetExclusiveScissorNV-commandBuffer-recording"},
{CMD_SETLINEWIDTH, "VUID-vkCmdSetLineWidth-commandBuffer-recording"},
{CMD_SETSAMPLELOCATIONSEXT, "VUID-vkCmdSetSampleLocationsEXT-commandBuffer-recording"},
{CMD_SETSCISSOR, "VUID-vkCmdSetScissor-commandBuffer-recording"},
{CMD_SETSTENCILCOMPAREMASK, "VUID-vkCmdSetStencilCompareMask-commandBuffer-recording"},
{CMD_SETSTENCILREFERENCE, "VUID-vkCmdSetStencilReference-commandBuffer-recording"},
{CMD_SETSTENCILWRITEMASK, "VUID-vkCmdSetStencilWriteMask-commandBuffer-recording"},
{CMD_SETVIEWPORT, "VUID-vkCmdSetViewport-commandBuffer-recording"},
{CMD_SETVIEWPORTSHADINGRATEPALETTE, "VUID-vkCmdSetViewportShadingRatePaletteNV-commandBuffer-recording"},
// Exclude vendor ext (if not already present) { CMD_SETVIEWPORTWSCALINGNV,
// "VUID-vkCmdSetViewportWScalingNV-commandBuffer-recording" },
{CMD_UPDATEBUFFER, "VUID-vkCmdUpdateBuffer-commandBuffer-recording"},
{CMD_WAITEVENTS, "VUID-vkCmdWaitEvents-commandBuffer-recording"},
{CMD_WRITETIMESTAMP, "VUID-vkCmdWriteTimestamp-commandBuffer-recording"},
};
// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
// there's an issue with the Cmd ordering
bool ValidateCmd(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) {
switch (cb_state->state) {
case CB_RECORDING:
return ValidateCmdSubpassState(dev_data, cb_state, cmd);
case CB_INVALID_COMPLETE:
case CB_INVALID_INCOMPLETE:
return ReportInvalidCommandBuffer(dev_data, cb_state, caller_name);
default:
auto error_it = must_be_recording_map.find(cmd);
// This assert lets us know that a vkCmd.* entrypoint has been added without enabling it in the map
assert(error_it != must_be_recording_map.cend());
if (error_it == must_be_recording_map.cend()) {
error_it = must_be_recording_map.find(CMD_NONE); // But we'll handle the asserting case, in case of a test gap
}
const auto error = error_it->second;
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), error,
"You must call vkBeginCommandBuffer() before this call to %s.", caller_name);
}
}
// For given object struct return a ptr of BASE_NODE type for its wrapping struct
BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
BASE_NODE *base_ptr = nullptr;
switch (object_struct.type) {
case kVulkanObjectTypeDescriptorSet: {
base_ptr = GetSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
break;
}
case kVulkanObjectTypeSampler: {
base_ptr = GetSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
break;
}
case kVulkanObjectTypeQueryPool: {
base_ptr = GetQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
break;
}
case kVulkanObjectTypePipeline: {
base_ptr = GetPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
break;
}
case kVulkanObjectTypeBuffer: {
base_ptr = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
break;
}
case kVulkanObjectTypeBufferView: {
base_ptr = GetBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
break;
}
case kVulkanObjectTypeImage: {
base_ptr = GetImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
break;
}
case kVulkanObjectTypeImageView: {
base_ptr = GetImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
break;
}
case kVulkanObjectTypeEvent: {
base_ptr = GetEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
break;
}
case kVulkanObjectTypeDescriptorPool: {
base_ptr = GetDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
break;
}
case kVulkanObjectTypeCommandPool: {
base_ptr = GetCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
break;
}
case kVulkanObjectTypeFramebuffer: {
base_ptr = GetFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
break;
}
case kVulkanObjectTypeRenderPass: {
base_ptr = GetRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
break;
}
case kVulkanObjectTypeDeviceMemory: {
base_ptr = GetMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
break;
}
default:
// TODO : Any other objects to be handled here?
assert(0);
break;
}
return base_ptr;
}
// Tie the VK_OBJECT to the cmd buffer which includes:
// Add object_binding to cmd buffer
// Add cb_binding to object
static void AddCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
cb_bindings->insert(cb_node);
cb_node->object_bindings.insert(obj);
}
// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
static void RemoveCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
if (base_obj) base_obj->cb_bindings.erase(cb_node);
}
// Reset the command buffer state
// Maintain the createInfo and set state to CB_NEW, but clear all other state
static void ResetCommandBufferState(layer_data *dev_data, const VkCommandBuffer cb) {
GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
if (pCB) {
pCB->in_use.store(0);
// Reset CB state (note that createInfo is not cleared)
pCB->commandBuffer = cb;
memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
pCB->hasDrawCmd = false;
pCB->state = CB_NEW;
pCB->submitCount = 0;
pCB->image_layout_change_count = 1; // Start at 1. 0 is insert value for validation cache versions, s.t. new == dirty
pCB->status = 0;
pCB->static_status = 0;
pCB->viewportMask = 0;
pCB->scissorMask = 0;
for (auto &item : pCB->lastBound) {
item.second.reset();
}
memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
pCB->activeRenderPass = nullptr;
pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
pCB->activeSubpass = 0;
pCB->broken_bindings.clear();
pCB->waitedEvents.clear();
pCB->events.clear();
pCB->writeEventsBeforeWait.clear();
pCB->waitedEventsBeforeQueryReset.clear();
pCB->queryToStateMap.clear();
pCB->activeQueries.clear();
pCB->startedQueries.clear();
pCB->imageLayoutMap.clear();
pCB->eventToStageMap.clear();
pCB->draw_data.clear();
pCB->current_draw_data.vertex_buffer_bindings.clear();
pCB->vertex_buffer_used = false;
pCB->primaryCommandBuffer = VK_NULL_HANDLE;
// If secondary, invalidate any primary command buffer that may call us.
if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
InvalidateCommandBuffers(dev_data, pCB->linkedCommandBuffers, {HandleToUint64(cb), kVulkanObjectTypeCommandBuffer});
}
// Remove reverse command buffer links.
for (auto pSubCB : pCB->linkedCommandBuffers) {
pSubCB->linkedCommandBuffers.erase(pCB);
}
pCB->linkedCommandBuffers.clear();
pCB->updateImages.clear();
pCB->updateBuffers.clear();
ClearCmdBufAndMemReferences(dev_data, pCB);
pCB->queue_submit_functions.clear();
pCB->cmd_execute_commands_functions.clear();
pCB->eventUpdates.clear();
pCB->queryUpdates.clear();
// Remove object bindings
for (auto obj : pCB->object_bindings) {
RemoveCommandBufferBinding(dev_data, &obj, pCB);
}
pCB->object_bindings.clear();
// Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
for (auto framebuffer : pCB->framebuffers) {
auto fb_state = GetFramebufferState(dev_data, framebuffer);
if (fb_state) fb_state->cb_bindings.erase(pCB);
}
pCB->framebuffers.clear();
pCB->activeFramebuffer = VK_NULL_HANDLE;
memset(&pCB->index_buffer_binding, 0, sizeof(pCB->index_buffer_binding));
pCB->qfo_transfer_image_barriers.Reset();
pCB->qfo_transfer_buffer_barriers.Reset();
}
}
CBStatusFlags MakeStaticStateMask(VkPipelineDynamicStateCreateInfo const *ds) {
// initially assume everything is static state
CBStatusFlags flags = CBSTATUS_ALL_STATE_SET;
if (ds) {
for (uint32_t i = 0; i < ds->dynamicStateCount; i++) {
switch (ds->pDynamicStates[i]) {
case VK_DYNAMIC_STATE_LINE_WIDTH:
flags &= ~CBSTATUS_LINE_WIDTH_SET;
break;
case VK_DYNAMIC_STATE_DEPTH_BIAS:
flags &= ~CBSTATUS_DEPTH_BIAS_SET;
break;
case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
flags &= ~CBSTATUS_BLEND_CONSTANTS_SET;
break;
case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
flags &= ~CBSTATUS_DEPTH_BOUNDS_SET;
break;
case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
flags &= ~CBSTATUS_STENCIL_READ_MASK_SET;
break;
case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
flags &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
break;
case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
flags &= ~CBSTATUS_STENCIL_REFERENCE_SET;
break;
case VK_DYNAMIC_STATE_SCISSOR:
flags &= ~CBSTATUS_SCISSOR_SET;
break;
case VK_DYNAMIC_STATE_VIEWPORT:
flags &= ~CBSTATUS_VIEWPORT_SET;
break;
case VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV:
flags &= ~CBSTATUS_EXCLUSIVE_SCISSOR_SET;
break;
case VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV:
flags &= ~CBSTATUS_SHADING_RATE_PALETTE_SET;
break;
default:
break;
}
}
}
return flags;
}
// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
// render pass.
bool InsideRenderPass(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const char *apiName, const std::string &msgCode) {
bool inside = false;
if (pCB->activeRenderPass) {
inside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), msgCode,
"%s: It is invalid to issue this call inside an active render pass (0x%" PRIx64 ").", apiName,
HandleToUint64(pCB->activeRenderPass->renderPass));
}
return inside;
}
// Flags validation error if the associated call is made outside a render pass. The apiName
// routine should ONLY be called inside a render pass.
bool OutsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, const std::string &msgCode) {
bool outside = false;
if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
outside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), msgCode, "%s: This call must be issued inside an active render pass.",
apiName);
}
return outside;
}
static void InitCoreValidation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
layer_debug_report_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
layer_debug_messenger_actions(instance_data->report_data, instance_data->logging_messenger, pAllocator,
"lunarg_core_validation");
}
// For the given ValidationCheck enum, set all relevant instance disabled flags to true
void SetDisabledFlags(instance_layer_data *instance_data, const VkValidationFlagsEXT *val_flags_struct) {
for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
switch (val_flags_struct->pDisabledValidationChecks[i]) {
case VK_VALIDATION_CHECK_SHADERS_EXT:
instance_data->disabled.shader_validation = true;
break;
case VK_VALIDATION_CHECK_ALL_EXT:
// Set all disabled flags to true
instance_data->disabled.SetAll(true);
break;
default:
break;
}
}
}
static void PreCallRecordCreateInstance(VkLayerInstanceCreateInfo *chain_info) {
// Advance the link info for the next element on the chain
chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
}
static void PostCallValidateCreateInstance(const VkInstanceCreateInfo *pCreateInfo) { ValidateLayerOrdering(*pCreateInfo); }
static void PostCallRecordCreateInstance(instance_layer_data *instance_data, const VkInstanceCreateInfo *pCreateInfo) {
// Parse any pNext chains
const auto *validation_flags_ext = lvl_find_in_chain<VkValidationFlagsEXT>(pCreateInfo->pNext);
if (validation_flags_ext) {
SetDisabledFlags(instance_data, validation_flags_ext);
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
VkInstance *pInstance) {
VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
PreCallRecordCreateInstance(chain_info);
VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
if (result != VK_SUCCESS) return result;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
instance_data->instance = *pInstance;
layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
instance_data->report_data = debug_utils_create_instance(
&instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
instance_data->api_version = instance_data->extensions.InitFromInstanceCreateInfo(
(pCreateInfo->pApplicationInfo ? pCreateInfo->pApplicationInfo->apiVersion : VK_API_VERSION_1_0), pCreateInfo);
InitCoreValidation(instance_data, pAllocator);
PostCallValidateCreateInstance(pCreateInfo);
PostCallRecordCreateInstance(instance_data, pCreateInfo);
return result;
}
static void PostCallRecordDestroyInstance(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator,
dispatch_key key) {
// Clean up logging callback, if any
while (instance_data->logging_messenger.size() > 0) {
VkDebugUtilsMessengerEXT messenger = instance_data->logging_messenger.back();
layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
instance_data->logging_messenger.pop_back();
}
while (instance_data->logging_callback.size() > 0) {
VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
layer_destroy_report_callback(instance_data->report_data, callback, pAllocator);
instance_data->logging_callback.pop_back();
}
layer_debug_utils_destroy_instance(instance_data->report_data);
FreeLayerDataPtr(key, instance_layer_data_map);
}
// Hook DestroyInstance to remove tableInstanceMap entry
VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
// TODOSC : Shouldn't need any customization here
dispatch_key key = get_dispatch_key(instance);
// TBD: Need any locking this early, in case this function is called at the
// same time by more than one thread?
instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map);
instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
lock_guard_t lock(global_lock);
PostCallRecordDestroyInstance(instance_data, pAllocator, key);
}
static bool ValidatePhysicalDeviceQueueFamily(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
uint32_t requested_queue_family, std::string err_code, const char *cmd_name,
const char *queue_family_var_name) {
bool skip = false;
const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
: "";
std::string count_note = (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
? "the pQueueFamilyPropertyCount was never obtained"
: "i.e. is not less than " + std::to_string(pd_state->queue_family_count);
if (requested_queue_family >= pd_state->queue_family_count) {
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(pd_state->phys_device), err_code,
"%s: %s (= %" PRIu32
") is not less than any previously obtained pQueueFamilyPropertyCount from "
"vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, count_note.c_str());
}
return skip;
}
// Verify VkDeviceQueueCreateInfos
static bool ValidateDeviceQueueCreateInfos(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
uint32_t info_count, const VkDeviceQueueCreateInfo *infos) {
bool skip = false;
for (uint32_t i = 0; i < info_count; ++i) {
const auto requested_queue_family = infos[i].queueFamilyIndex;
// Verify that requested queue family is known to be valid at this point in time
std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, requested_queue_family,
"VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381", "vkCreateDevice",
queue_family_var_name.c_str());
// Verify that requested queue count of queue family is known to be valid at this point in time
if (requested_queue_family < pd_state->queue_family_count) {
const auto requested_queue_count = infos[i].queueCount;
const auto queue_family_props_count = pd_state->queue_family_properties.size();
const bool queue_family_has_props = requested_queue_family < queue_family_props_count;
const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
: "";
std::string count_note =
!queue_family_has_props
? "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"
: "i.e. is not less than or equal to " +
std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount);
if (!queue_family_has_props ||
requested_queue_count > pd_state->queue_family_properties[requested_queue_family].queueCount) {
skip |= log_msg(
instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(pd_state->phys_device), "VUID-VkDeviceQueueCreateInfo-queueCount-00382",
"vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32
"].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str());
}
}
}
return skip;
}
static bool PreCallValidateCreateDevice(instance_layer_data *instance_data, const VkPhysicalDeviceFeatures **enabled_features_found,
VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo) {
bool skip = false;
auto pd_state = GetPhysicalDeviceState(instance_data, gpu);
// TODO: object_tracker should perhaps do this instead
// and it does not seem to currently work anyway -- the loader just crashes before this point
if (!GetPhysicalDeviceState(instance_data, gpu)) {
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
0, kVUID_Core_DevLimit_MustQueryCount,
"Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
}
// The enabled features can come from either pEnabledFeatures, or from the pNext chain
// TODO: Validate "VUID-VkDeviceCreateInfo-pNext-00373" here, can't have non-null pEnabledFeatures & GPDF2 in pNext chain
if (nullptr == *enabled_features_found) {
const auto *features2 = lvl_find_in_chain<VkPhysicalDeviceFeatures2KHR>(pCreateInfo->pNext);
if (features2) {
*enabled_features_found = &(features2->features);
}
}
skip |=
ValidateDeviceQueueCreateInfos(instance_data, pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
return skip;
}
static void PreCallRecordCreateDevice(VkLayerDeviceCreateInfo *chain_info) {
chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
}
static void PostCallValidateCreateDevice(const VkDeviceCreateInfo *pCreateInfo) { ValidateLayerOrdering(*pCreateInfo); }
static void PostCallRecordCreateDevice(instance_layer_data *instance_data, const VkPhysicalDeviceFeatures *enabled_features_found,
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr, VkPhysicalDevice gpu,
const VkDeviceCreateInfo *pCreateInfo, VkDevice *pDevice) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
device_data->instance_data = instance_data;
// Setup device dispatch table
layer_init_device_dispatch_table(*pDevice, &device_data->dispatch_table, fpGetDeviceProcAddr);
device_data->device = *pDevice;
// Save PhysicalDevice handle
device_data->physical_device = gpu;
device_data->report_data = layer_debug_utils_create_device(instance_data->report_data, *pDevice);
// Get physical device limits for this device
instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(device_data->phys_dev_properties.properties));
// Setup the validation tables based on the application API version from the instance and the capabilities of the device driver.
uint32_t effective_api_version = std::min(device_data->phys_dev_properties.properties.apiVersion, instance_data->api_version);
device_data->api_version =
device_data->extensions.InitFromDeviceCreateInfo(&instance_data->extensions, effective_api_version, pCreateInfo);
uint32_t count;
instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
device_data->phys_dev_properties.queue_family_properties.resize(count);
instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
gpu, &count, &device_data->phys_dev_properties.queue_family_properties[0]);
// TODO: device limits should make sure these are compatible
if (enabled_features_found) {
device_data->enabled_features.core = *enabled_features_found;
}
const auto *descriptor_indexing_features = lvl_find_in_chain<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(pCreateInfo->pNext);
if (descriptor_indexing_features) {
device_data->enabled_features.descriptor_indexing = *descriptor_indexing_features;
}
const auto *eight_bit_storage_features = lvl_find_in_chain<VkPhysicalDevice8BitStorageFeaturesKHR>(pCreateInfo->pNext);
if (eight_bit_storage_features) {
device_data->enabled_features.eight_bit_storage = *eight_bit_storage_features;
}
const auto *exclusive_scissor_features = lvl_find_in_chain<VkPhysicalDeviceExclusiveScissorFeaturesNV>(pCreateInfo->pNext);
if (exclusive_scissor_features) {
device_data->enabled_features.exclusive_scissor = *exclusive_scissor_features;
}
const auto *shading_rate_image_features = lvl_find_in_chain<VkPhysicalDeviceShadingRateImageFeaturesNV>(pCreateInfo->pNext);
if (shading_rate_image_features) {
device_data->enabled_features.shading_rate_image = *shading_rate_image_features;
}
const auto *mesh_shader_features = lvl_find_in_chain<VkPhysicalDeviceMeshShaderFeaturesNV>(pCreateInfo->pNext);
if (mesh_shader_features) {
device_data->enabled_features.mesh_shader = *mesh_shader_features;
}
const auto *inline_uniform_block_features = lvl_find_in_chain<VkPhysicalDeviceInlineUniformBlockFeaturesEXT>(pCreateInfo->pNext);
if (inline_uniform_block_features) {
device_data->enabled_features.inline_uniform_block = *inline_uniform_block_features;
}
// Store physical device properties and physical device mem limits into device layer_data structs
instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &device_data->phys_dev_mem_props);
instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &device_data->phys_dev_props);
if (device_data->extensions.vk_khr_push_descriptor) {
// Get the needed push_descriptor limits
auto push_descriptor_prop = lvl_init_struct<VkPhysicalDevicePushDescriptorPropertiesKHR>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&push_descriptor_prop);
instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
device_data->phys_dev_ext_props.max_push_descriptors = push_descriptor_prop.maxPushDescriptors;
}
if (device_data->extensions.vk_ext_descriptor_indexing) {
// Get the needed descriptor_indexing limits
auto descriptor_indexing_props = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingPropertiesEXT>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&descriptor_indexing_props);
instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
device_data->phys_dev_ext_props.descriptor_indexing_props = descriptor_indexing_props;
}
if (device_data->extensions.vk_nv_shading_rate_image) {
// Get the needed shading rate image limits
auto shading_rate_image_props = lvl_init_struct<VkPhysicalDeviceShadingRateImagePropertiesNV>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&shading_rate_image_props);
instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
device_data->phys_dev_ext_props.shading_rate_image_props = shading_rate_image_props;
}
if (device_data->extensions.vk_nv_mesh_shader) {
// Get the needed mesh shader limits
auto mesh_shader_props = lvl_init_struct<VkPhysicalDeviceMeshShaderPropertiesNV>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&mesh_shader_props);
instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
device_data->phys_dev_ext_props.mesh_shader_props = mesh_shader_props;
}
if (device_data->extensions.vk_ext_inline_uniform_block) {
// Get the needed inline uniform block limits
auto inline_uniform_block_props = lvl_init_struct<VkPhysicalDeviceInlineUniformBlockPropertiesEXT>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&inline_uniform_block_props);
instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
device_data->phys_dev_ext_props.inline_uniform_block_props = inline_uniform_block_props;
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
unique_lock_t lock(global_lock);
const VkPhysicalDeviceFeatures *enabled_features_found = pCreateInfo->pEnabledFeatures;
bool skip = PreCallValidateCreateDevice(instance_data, &enabled_features_found, gpu, pCreateInfo);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_data->instance, "vkCreateDevice");
if (fpCreateDevice == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
// Advance the link info for the next element on the chain
PreCallRecordCreateDevice(chain_info);
lock.unlock();
VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
if (result != VK_SUCCESS) {
return result;
}
lock.lock();
PostCallRecordCreateDevice(instance_data, enabled_features_found, fpGetDeviceProcAddr, gpu, pCreateInfo, pDevice);
PostCallValidateCreateDevice(pCreateInfo);
lock.unlock();
return result;
}
static void PreCallRecordDestroyDevice(layer_data *dev_data, VkDevice device) {
dev_data->pipelineMap.clear();
dev_data->renderPassMap.clear();
for (auto ii = dev_data->commandBufferMap.begin(); ii != dev_data->commandBufferMap.end(); ++ii) {
delete (*ii).second;
}
dev_data->commandBufferMap.clear();
// This will also delete all sets in the pool & remove them from setMap
DeletePools(dev_data);
// All sets should be removed
assert(dev_data->setMap.empty());
dev_data->descriptorSetLayoutMap.clear();
dev_data->imageViewMap.clear();
dev_data->imageMap.clear();
dev_data->imageSubresourceMap.clear();
dev_data->imageLayoutMap.clear();
dev_data->bufferViewMap.clear();
dev_data->bufferMap.clear();
// Queues persist until device is destroyed
dev_data->queueMap.clear();
// Report any memory leaks
layer_debug_utils_destroy_device(device);
}
static void PostCallRecordDestroyDevice(const dispatch_key &key) { FreeLayerDataPtr(key, layer_data_map); }
VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
// TODOSC : Shouldn't need any customization here
dispatch_key key = get_dispatch_key(device);
layer_data *dev_data = GetLayerDataPtr(key, layer_data_map);
unique_lock_t lock(global_lock);
PreCallRecordDestroyDevice(dev_data, device);
lock.unlock();
#if DISPATCH_MAP_DEBUG
fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
#endif
dev_data->dispatch_table.DestroyDevice(device, pAllocator);
// Free all the memory
lock.lock();
PostCallRecordDestroyDevice(key);
}
static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
// For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
// and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id.
// Similarly for mesh and task shaders.
static bool ValidateStageMaskGsTsEnables(const layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
std::string geo_error_id, std::string tess_error_id, std::string mesh_error_id, std::string task_error_id) {
bool skip = false;
if (!dev_data->enabled_features.core.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, geo_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when device does not have "
"geometryShader feature enabled.",
caller);
}
if (!dev_data->enabled_features.core.tessellationShader &&
(stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, tess_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT and/or "
"VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device does not have "
"tessellationShader feature enabled.",
caller);
}
if (!dev_data->enabled_features.mesh_shader.meshShader && (stageMask & VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, mesh_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV bit set when device does not have "
"VkPhysicalDeviceMeshShaderFeaturesNV::meshShader feature enabled.",
caller);
}
if (!dev_data->enabled_features.mesh_shader.taskShader && (stageMask & VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, task_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV bit set when device does not have "
"VkPhysicalDeviceMeshShaderFeaturesNV::taskShader feature enabled.",
caller);
}
return skip;
}
// Loop through bound objects and increment their in_use counts.
static void IncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
for (auto obj : cb_node->object_bindings) {
auto base_obj = GetStateStructPtrFromObject(dev_data, obj);
if (base_obj) {
base_obj->in_use.fetch_add(1);
}
}
}
// Track which resources are in-flight by atomically incrementing their "in_use" count
static void IncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
cb_node->submitCount++;
cb_node->in_use.fetch_add(1);
// First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
IncrementBoundObjects(dev_data, cb_node);
// TODO : We should be able to remove the NULL look-up checks from the code below as long as
// all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
// should then be flagged prior to calling this function
for (auto draw_data_element : cb_node->draw_data) {
for (auto &vertex_buffer : draw_data_element.vertex_buffer_bindings) {
auto buffer_state = GetBufferState(dev_data, vertex_buffer.buffer);
if (buffer_state) {
buffer_state->in_use.fetch_add(1);
}
}
}
for (auto event : cb_node->writeEventsBeforeWait) {
auto event_state = GetEventNode(dev_data, event);
if (event_state) event_state->write_in_use++;
}
}
// Note: This function assumes that the global lock is held by the calling thread.
// For the given queue, verify the queue state up to the given seq number.
// Currently the only check is to make sure that if there are events to be waited on prior to
// a QueryReset, make sure that all such events have been signalled.
static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *initial_queue, uint64_t initial_seq) {
bool skip = false;
// sequence number we want to validate up to, per queue
std::unordered_map<QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}};
// sequence number we've completed validation for, per queue
std::unordered_map<QUEUE_STATE *, uint64_t> done_seqs;
std::vector<QUEUE_STATE *> worklist{initial_queue};
while (worklist.size()) {
auto queue = worklist.back();
worklist.pop_back();
auto target_seq = target_seqs[queue];
auto seq = std::max(done_seqs[queue], queue->seq);
auto sub_it = queue->submissions.begin() + int(seq - queue->seq); // seq >= queue->seq
for (; seq < target_seq; ++sub_it, ++seq) {
for (auto &wait : sub_it->waitSemaphores) {
auto other_queue = GetQueueState(dev_data, wait.queue);
if (other_queue == queue) continue; // semaphores /always/ point backwards, so no point here.
auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
// if this wait is for another queue, and covers new sequence
// numbers beyond what we've already validated, mark the new
// target seq and (possibly-re)add the queue to the worklist.
if (other_done_seq < other_target_seq) {
target_seqs[other_queue] = other_target_seq;
worklist.push_back(other_queue);
}
}
}
// finally mark the point we've now validated this queue to.
done_seqs[queue] = seq;
}
return skip;
}
// When the given fence is retired, verify outstanding queue operations through the point of the fence
static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
auto fence_state = GetFenceNode(dev_data, fence);
if (fence_state && fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) {
return VerifyQueueStateToSeq(dev_data, GetQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
}
return false;
}
// Decrement in-use count for objects bound to command buffer
static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
BASE_NODE *base_obj = nullptr;
for (auto obj : cb_node->object_bindings) {
base_obj = GetStateStructPtrFromObject(dev_data, obj);
if (base_obj) {
base_obj->in_use.fetch_sub(1);
}
}
}
static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
// Roll this queue forward, one submission at a time.
while (pQueue->seq < seq) {
auto &submission = pQueue->submissions.front();
for (auto &wait : submission.waitSemaphores) {
auto pSemaphore = GetSemaphoreNode(dev_data, wait.semaphore);
if (pSemaphore) {
pSemaphore->in_use.fetch_sub(1);
}
auto &lastSeq = otherQueueSeqs[wait.queue];
lastSeq = std::max(lastSeq, wait.seq);
}
for (auto &semaphore : submission.signalSemaphores) {
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore) {
pSemaphore->in_use.fetch_sub(1);
}
}
for (auto &semaphore : submission.externalSemaphores) {
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore) {
pSemaphore->in_use.fetch_sub(1);
}
}
for (auto cb : submission.cbs) {
auto cb_node = GetCBNode(dev_data, cb);
if (!cb_node) {
continue;
}
// First perform decrement on general case bound objects
DecrementBoundResources(dev_data, cb_node);
for (auto draw_data_element : cb_node->draw_data) {
for (auto &vertex_buffer_binding : draw_data_element.vertex_buffer_bindings) {
auto buffer_state = GetBufferState(dev_data, vertex_buffer_binding.buffer);
if (buffer_state) {
buffer_state->in_use.fetch_sub(1);
}
}
}
for (auto event : cb_node->writeEventsBeforeWait) {
auto eventNode = dev_data->eventMap.find(event);
if (eventNode != dev_data->eventMap.end()) {
eventNode->second.write_in_use--;
}
}
for (auto queryStatePair : cb_node->queryToStateMap) {
dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
}
for (auto eventStagePair : cb_node->eventToStageMap) {
dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
}
cb_node->in_use.fetch_sub(1);
}
auto pFence = GetFenceNode(dev_data, submission.fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
pFence->state = FENCE_RETIRED;
}
pQueue->submissions.pop_front();
pQueue->seq++;
}
// Roll other queues forward to the highest seq we saw a wait for
for (auto qs : otherQueueSeqs) {
RetireWorkOnQueue(dev_data, GetQueueState(dev_data, qs.first), qs.second);
}
}
// Submit a fence to a queue, delimiting previous fences and previous untracked
// work by it.
static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
pFence->state = FENCE_INFLIGHT;
pFence->signaler.first = pQueue->queue;
pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
}
static bool ValidateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
bool skip = false;
if ((pCB->in_use.load() || current_submit_count > 1) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
"VUID-vkQueueSubmit-pCommandBuffers-00071",
"Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
HandleToUint64(pCB->commandBuffer));
}
return skip;
}
static bool ValidateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source,
int current_submit_count, std::string vu_id) {
bool skip = false;
if (dev_data->instance_data->disabled.command_buffer_state) return skip;
// Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
(cb_state->submitCount + current_submit_count > 1)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
kVUID_Core_DrawState_CommandBufferSingleSubmitViolation,
"Commandbuffer 0x%" PRIx64
" was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64
" times.",
HandleToUint64(cb_state->commandBuffer), cb_state->submitCount + current_submit_count);
}
// Validate that cmd buffers have been updated
switch (cb_state->state) {
case CB_INVALID_INCOMPLETE:
case CB_INVALID_COMPLETE:
skip |= ReportInvalidCommandBuffer(dev_data, cb_state, call_source);
break;
case CB_NEW:
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
(uint64_t)(cb_state->commandBuffer), vu_id,
"Command buffer 0x%" PRIx64 " used in the call to %s is unrecorded and contains no commands.",
HandleToUint64(cb_state->commandBuffer), call_source);
break;
case CB_RECORDING:
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), kVUID_Core_DrawState_NoEndCommandBuffer,
"You must call vkEndCommandBuffer() on command buffer 0x%" PRIx64 " before this call to %s!",
HandleToUint64(cb_state->commandBuffer), call_source);
break;
default: /* recorded */
break;
}
return skip;
}
static bool ValidateResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
bool skip = false;
// TODO : We should be able to remove the NULL look-up checks from the code below as long as
// all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
// should then be flagged prior to calling this function
for (const auto &draw_data_element : cb_node->draw_data) {
for (const auto &vertex_buffer_binding : draw_data_element.vertex_buffer_bindings) {
auto buffer_state = GetBufferState(dev_data, vertex_buffer_binding.buffer);
if ((vertex_buffer_binding.buffer != VK_NULL_HANDLE) && (!buffer_state)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(vertex_buffer_binding.buffer), kVUID_Core_DrawState_InvalidBuffer,
"Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".",
HandleToUint64(vertex_buffer_binding.buffer));
}
}
}
return skip;
}
// Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
bool ValidImageBufferQueue(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue, uint32_t count,
const uint32_t *indices) {
bool found = false;
bool skip = false;
auto queue_state = GetQueueState(dev_data, queue);
if (queue_state) {
for (uint32_t i = 0; i < count; i++) {
if (indices[i] == queue_state->queueFamilyIndex) {
found = true;
break;
}
}
if (!found) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object->type],
object->handle, kVUID_Core_DrawState_InvalidQueueFamily,
"vkQueueSubmit: Command buffer 0x%" PRIx64 " contains %s 0x%" PRIx64
" which was not created allowing concurrent access to this queue family %d.",
HandleToUint64(cb_node->commandBuffer), object_string[object->type], object->handle,
queue_state->queueFamilyIndex);
}
}
return skip;
}
// Validate that queueFamilyIndices of primary command buffers match this queue
// Secondary command buffers were previously validated in vkCmdExecuteCommands().
static bool ValidateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
bool skip = false;
auto pPool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
auto queue_state = GetQueueState(dev_data, queue);
if (pPool && queue_state) {
if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), "VUID-vkQueueSubmit-pCommandBuffers-00074",
"vkQueueSubmit: Primary command buffer 0x%" PRIx64
" created in queue family %d is being submitted on queue 0x%" PRIx64 " from queue family %d.",
HandleToUint64(pCB->commandBuffer), pPool->queueFamilyIndex, HandleToUint64(queue),
queue_state->queueFamilyIndex);
}
// Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
for (auto object : pCB->object_bindings) {
if (object.type == kVulkanObjectTypeImage) {
auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(object.handle));
if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, image_state->createInfo.queueFamilyIndexCount,
image_state->createInfo.pQueueFamilyIndices);
}
} else if (object.type == kVulkanObjectTypeBuffer) {
auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object.handle));
if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, buffer_state->createInfo.queueFamilyIndexCount,
buffer_state->createInfo.pQueueFamilyIndices);
}
}
}
}
return skip;
}
static bool ValidatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count,
QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards,
QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) {
// Track in-use for resources off of primary and any secondary CBs
bool skip = false;
// If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
// on device
skip |= ValidateCommandBufferSimultaneousUse(dev_data, pCB, current_submit_count);
skip |= ValidateResources(dev_data, pCB);
skip |= ValidateQueuedQFOTransfers(dev_data, pCB, qfo_image_scoreboards, qfo_buffer_scoreboards);
for (auto pSubCB : pCB->linkedCommandBuffers) {
skip |= ValidateResources(dev_data, pSubCB);
skip |= ValidateQueuedQFOTransfers(dev_data, pSubCB, qfo_image_scoreboards, qfo_buffer_scoreboards);
// TODO: replace with InvalidateCommandBuffers() at recording.
if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
"VUID-vkQueueSubmit-pCommandBuffers-00073",
"Commandbuffer 0x%" PRIx64 " was submitted with secondary buffer 0x%" PRIx64
" but that buffer has subsequently been bound to primary cmd buffer 0x%" PRIx64
" and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
HandleToUint64(pCB->commandBuffer), HandleToUint64(pSubCB->commandBuffer),
HandleToUint64(pSubCB->primaryCommandBuffer));
}
}
skip |= ValidateCommandBufferState(dev_data, pCB, "vkQueueSubmit()", current_submit_count,
"VUID-vkQueueSubmit-pCommandBuffers-00072");
return skip;
}
static bool ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
bool skip = false;
if (pFence && pFence->scope == kSyncScopeInternal) {
if (pFence->state == FENCE_INFLIGHT) {
// TODO: opportunities for "VUID-vkQueueSubmit-fence-00064", "VUID-vkQueueBindSparse-fence-01114",
// "VUID-vkAcquireNextImageKHR-fence-01287"
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(pFence->fence), kVUID_Core_DrawState_InvalidFence,
"Fence 0x%" PRIx64 " is already in use by another submission.", HandleToUint64(pFence->fence));
}
else if (pFence->state == FENCE_RETIRED) {
// TODO: opportunities for "VUID-vkQueueSubmit-fence-00063", "VUID-vkQueueBindSparse-fence-01113",
// "VUID-vkAcquireNextImageKHR-fence-01287"
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(pFence->fence), kVUID_Core_MemTrack_FenceState,
"Fence 0x%" PRIx64 " submitted in SIGNALED state. Fences must be reset before being submitted",
HandleToUint64(pFence->fence));
}
}
return skip;
}
static void PostCallRecordQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
VkFence fence) {
uint64_t early_retire_seq = 0;
auto pQueue = GetQueueState(dev_data, queue);
auto pFence = GetFenceNode(dev_data, fence);
if (pFence) {
if (pFence->scope == kSyncScopeInternal) {
// Mark fence in use
SubmitFence(pQueue, pFence, std::max(1u, submitCount));
if (!submitCount) {
// If no submissions, but just dropping a fence on the end of the queue,
// record an empty submission with just the fence, so we can determine
// its completion.
pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
}
} else {
// Retire work up until this fence early, we will not see the wait that corresponds to this signal
early_retire_seq = pQueue->seq + pQueue->submissions.size();
if (!dev_data->external_sync_warning) {
dev_data->external_sync_warning = true;
log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(fence), kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueSubmit(): Signaling external fence 0x%" PRIx64 " on queue 0x%" PRIx64
" will disable validation of preceding command buffer lifecycle states and the in-use status of associated "
"objects.",
HandleToUint64(fence), HandleToUint64(queue));
}
}
}
// Now process each individual submit
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
std::vector<VkCommandBuffer> cbs;
const VkSubmitInfo *submit = &pSubmits[submit_idx];
vector<SEMAPHORE_WAIT> semaphore_waits;
vector<VkSemaphore> semaphore_signals;
vector<VkSemaphore> semaphore_externals;
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pWaitSemaphores[i];
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore) {
if (pSemaphore->scope == kSyncScopeInternal) {
if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
pSemaphore->in_use.fetch_add(1);
}
pSemaphore->signaler.first = VK_NULL_HANDLE;
pSemaphore->signaled = false;
} else {
semaphore_externals.push_back(semaphore);
pSemaphore->in_use.fetch_add(1);
if (pSemaphore->scope == kSyncScopeExternalTemporary) {
pSemaphore->scope = kSyncScopeInternal;
}
}
}
}
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore) {
if (pSemaphore->scope == kSyncScopeInternal) {
pSemaphore->signaler.first = queue;
pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
pSemaphore->signaled = true;
pSemaphore->in_use.fetch_add(1);
semaphore_signals.push_back(semaphore);
} else {
// Retire work up until this submit early, we will not see the wait that corresponds to this signal
early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
if (!dev_data->external_sync_warning) {
dev_data->external_sync_warning = true;
log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueSubmit(): Signaling external semaphore 0x%" PRIx64 " on queue 0x%" PRIx64
" will disable validation of preceding command buffer lifecycle states and the in-use status of "
"associated objects.",
HandleToUint64(semaphore), HandleToUint64(queue));
}
}
}
}
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
if (cb_node) {
cbs.push_back(submit->pCommandBuffers[i]);
for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
cbs.push_back(secondaryCmdBuffer->commandBuffer);
UpdateCmdBufImageLayouts(dev_data, secondaryCmdBuffer);
IncrementResources(dev_data, secondaryCmdBuffer);
RecordQueuedQFOTransfers(dev_data, secondaryCmdBuffer);
}
UpdateCmdBufImageLayouts(dev_data, cb_node);
IncrementResources(dev_data, cb_node);
RecordQueuedQFOTransfers(dev_data, cb_node);
}
}
pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals, semaphore_externals,
submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
}
if (early_retire_seq) {
RetireWorkOnQueue(dev_data, pQueue, early_retire_seq);
}
}
static bool PreCallValidateQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
VkFence fence) {
auto pFence = GetFenceNode(dev_data, fence);
bool skip = ValidateFenceForSubmit(dev_data, pFence);
if (skip) {
return true;
}
unordered_set<VkSemaphore> signaled_semaphores;
unordered_set<VkSemaphore> unsignaled_semaphores;
unordered_set<VkSemaphore> internal_semaphores;
vector<VkCommandBuffer> current_cmds;
unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> localImageLayoutMap;
// Now verify each individual submit
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
skip |= ValidateStageMaskGsTsEnables(dev_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()",
"VUID-VkSubmitInfo-pWaitDstStageMask-00076",
"VUID-VkSubmitInfo-pWaitDstStageMask-00077",
"VUID-VkSubmitInfo-pWaitDstStageMask-02089",
"VUID-VkSubmitInfo-pWaitDstStageMask-02090");
VkSemaphore semaphore = submit->pWaitSemaphores[i];
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (unsignaled_semaphores.count(semaphore) ||
(!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
HandleToUint64(queue), HandleToUint64(semaphore));
} else {
signaled_semaphores.erase(semaphore);
unsignaled_semaphores.insert(semaphore);
}
}
if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
internal_semaphores.insert(semaphore);
}
}
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
" that was previously signaled by queue 0x%" PRIx64
" but has not since been waited on by any queue.",
HandleToUint64(queue), HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
} else {
unsignaled_semaphores.erase(semaphore);
signaled_semaphores.insert(semaphore);
}
}
}
QFOTransferCBScoreboards<VkImageMemoryBarrier> qfo_image_scoreboards;
QFOTransferCBScoreboards<VkBufferMemoryBarrier> qfo_buffer_scoreboards;
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
if (cb_node) {
skip |= ValidateCmdBufImageLayouts(dev_data, cb_node, dev_data->imageLayoutMap, localImageLayoutMap);
current_cmds.push_back(submit->pCommandBuffers[i]);
skip |= ValidatePrimaryCommandBufferState(
dev_data, cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]),
&qfo_image_scoreboards, &qfo_buffer_scoreboards);
skip |= ValidateQueueFamilyIndices(dev_data, cb_node, queue);
// Potential early exit here as bad object state may crash in delayed function calls
if (skip) {
return true;
}
// Call submit-time functions to validate/update state
for (auto &function : cb_node->queue_submit_functions) {
skip |= function();
}
for (auto &function : cb_node->eventUpdates) {
skip |= function(queue);
}
for (auto &function : cb_node->queryUpdates) {
skip |= function(queue);
}
}
}
}
return skip;
}
VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
lock.lock();
PostCallRecordQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
lock.unlock();
return result;
}
static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
bool skip = false;
if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), kVUIDUndefined,
"Number of currently valid memory objects is not less than the maximum allowed (%u).",
dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount);
}
return skip;
}
static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
AddMemObjInfo(dev_data, dev_data->device, *pMemory, pAllocateInfo);
return;
}
VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateAllocateMemory(dev_data);
if (!skip) {
lock.unlock();
result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
lock.lock();
if (VK_SUCCESS == result) {
PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
}
}
return result;
}
// For given obj node, if it is use, flag a validation error and return callback result, else return false
bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct, const char *caller_name,
const std::string &error_code) {
if (dev_data->instance_data->disabled.object_in_use) return false;
bool skip = false;
if (obj_node->in_use.load()) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle,
error_code, "Cannot call %s on %s 0x%" PRIx64 " that is currently in use by a command buffer.", caller_name,
object_string[obj_struct.type], obj_struct.handle);
}
return skip;
}
static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
*mem_info = GetMemObjInfo(dev_data, mem);
*obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory};
if (dev_data->instance_data->disabled.free_memory) return false;
bool skip = false;
if (*mem_info) {
skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, "vkFreeMemory", "VUID-vkFreeMemory-memory-00677");
}
return skip;
}
static void PreCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
// Clear mem binding for any bound objects
for (auto obj : mem_info->obj_bindings) {
log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle,
kVUID_Core_MemTrack_FreedMemRef, "VK Object 0x%" PRIx64 " still has a reference to mem obj 0x%" PRIx64,
HandleToUint64(obj.handle), HandleToUint64(mem_info->mem));
BINDABLE *bindable_state = nullptr;
switch (obj.type) {
case kVulkanObjectTypeImage:
bindable_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
break;
case kVulkanObjectTypeBuffer:
bindable_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
break;
default:
// Should only have buffer or image objects bound to memory
assert(0);
}
assert(bindable_state);
bindable_state->binding.mem = MEMORY_UNBOUND;
bindable_state->UpdateBoundMemorySet();
}
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
dev_data->memObjMap.erase(mem);
}
VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
DEVICE_MEM_INFO *mem_info = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
if (!skip) {
if (mem != VK_NULL_HANDLE) {
// Avoid free/alloc race by recording state change before dispatching
PreCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
}
}
// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
// and that the size of the map range should be:
// 1. Not zero
// 2. Within the size of the memory allocation
static bool ValidateMapMemRange(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
bool skip = false;
if (size == 0) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap,
"VkMapMemory: Attempting to map memory range of size zero");
}
auto mem_element = dev_data->memObjMap.find(mem);
if (mem_element != dev_data->memObjMap.end()) {
auto mem_info = mem_element->second.get();
// It is an application error to call VkMapMemory on an object that is already mapped
if (mem_info->mem_range.size != 0) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap,
"VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIx64, HandleToUint64(mem));
}
// Validate that offset + size is within object's allocationSize
if (size == VK_WHOLE_SIZE) {
if (offset >= mem_info->alloc_info.allocationSize) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap,
"Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
" with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
}
} else {
if ((offset + size) > mem_info->alloc_info.allocationSize) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), "VUID-vkMapMemory-size-00681",
"Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ".",
offset, size + offset, mem_info->alloc_info.allocationSize);
}
}
}
return skip;
}
static void StoreMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
auto mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
mem_info->mem_range.offset = offset;
mem_info->mem_range.size = size;
}
}
// Guard value for pad data
static char NoncoherentMemoryFillValue = 0xb;
static void InitializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
void **ppData) {
auto mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
mem_info->p_driver_data = *ppData;
uint32_t index = mem_info->alloc_info.memoryTypeIndex;
if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
mem_info->shadow_copy = 0;
} else {
if (size == VK_WHOLE_SIZE) {
size = mem_info->alloc_info.allocationSize - offset;
}
mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
assert(SafeModulo(mem_info->shadow_pad_size, dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) ==
0);
// Ensure start of mapped region reflects hardware alignment constraints
uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
// From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
uint64_t start_offset = offset % map_alignment;
// Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
mem_info->shadow_copy_base =
malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
mem_info->shadow_copy =
reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
~(map_alignment - 1)) +
start_offset;
assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
map_alignment) == 0);
memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
*ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
}
}
}
// Verify that state for fence being waited on is appropriate. That is,
// a fence being waited on should not already be signaled and
// it should have been submitted on a queue or during acquire next image
static inline bool VerifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
bool skip = false;
auto pFence = GetFenceNode(dev_data, fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
if (pFence->state == FENCE_UNSIGNALED) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(fence), kVUID_Core_MemTrack_FenceState,
"%s called for fence 0x%" PRIx64 " which has not been submitted on a Queue or during acquire next image.",
apiCall, HandleToUint64(fence));
}
}
return skip;
}
static void RetireFence(layer_data *dev_data, VkFence fence) {
auto pFence = GetFenceNode(dev_data, fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
if (pFence->signaler.first != VK_NULL_HANDLE) {
// Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
RetireWorkOnQueue(dev_data, GetQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
} else {
// Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
// the fence as retired.
pFence->state = FENCE_RETIRED;
}
}
}
static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
if (dev_data->instance_data->disabled.wait_for_fences) return false;
bool skip = false;
for (uint32_t i = 0; i < fence_count; i++) {
skip |= VerifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
skip |= VerifyQueueStateToFence(dev_data, fences[i]);
}
return skip;
}
static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
// When we know that all fences are complete we can clean/remove their CBs
if ((VK_TRUE == wait_all) || (1 == fence_count)) {
for (uint32_t i = 0; i < fence_count; i++) {
RetireFence(dev_data, fences[i]);
}
}
// NOTE : Alternate case not handled here is when some fences have completed. In
// this case for app to guarantee which fences completed it will have to call
// vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
}
VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
uint64_t timeout) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Verify fence status of submitted fences
unique_lock_t lock(global_lock);
bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
if (result == VK_SUCCESS) {
lock.lock();
PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
lock.unlock();
}
return result;
}
static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
if (dev_data->instance_data->disabled.get_fence_state) return false;
return VerifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
}
static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
if (result == VK_SUCCESS) {
lock.lock();
PostCallRecordGetFenceStatus(dev_data, fence);
lock.unlock();
}
return result;
}
static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
// Add queue to tracking set only if it is new
auto result = dev_data->queues.emplace(queue);
if (result.second == true) {
QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
queue_state->queue = queue;
queue_state->queueFamilyIndex = q_family_index;
queue_state->seq = 0;
}
}
VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
lock_guard_t lock(global_lock);
PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
}
VKAPI_ATTR void VKAPI_CALL GetDeviceQueue2(VkDevice device, VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetDeviceQueue2(device, pQueueInfo, pQueue);
lock_guard_t lock(global_lock);
if (*pQueue != VK_NULL_HANDLE) {
PostCallRecordGetDeviceQueue(dev_data, pQueueInfo->queueFamilyIndex, *pQueue);
}
}
static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
*queue_state = GetQueueState(dev_data, queue);
if (dev_data->instance_data->disabled.queue_wait_idle) return false;
return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
}
static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
}
VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
QUEUE_STATE *queue_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordQueueWaitIdle(dev_data, queue_state);
lock.unlock();
}
return result;
}
static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
if (dev_data->instance_data->disabled.device_wait_idle) return false;
bool skip = false;
for (auto &queue : dev_data->queueMap) {
skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
}
return skip;
}
static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
for (auto &queue : dev_data->queueMap) {
RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
}
}
VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDeviceWaitIdle(dev_data);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordDeviceWaitIdle(dev_data);
lock.unlock();
}
return result;
}
static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
*fence_node = GetFenceNode(dev_data, fence);
*obj_struct = {HandleToUint64(fence), kVulkanObjectTypeFence};
if (dev_data->instance_data->disabled.destroy_fence) return false;
bool skip = false;
if (*fence_node) {
if ((*fence_node)->scope == kSyncScopeInternal && (*fence_node)->state == FENCE_INFLIGHT) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(fence), "VUID-vkDestroyFence-fence-01120", "Fence 0x%" PRIx64 " is in use.",
HandleToUint64(fence));
}
}
return skip;
}
static void PreCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Common data objects used pre & post call
FENCE_NODE *fence_node = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
if (!skip) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyFence(dev_data, fence);
lock.unlock();
dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
}
}
static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
VK_OBJECT *obj_struct) {
*sema_node = GetSemaphoreNode(dev_data, semaphore);
*obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
if (dev_data->instance_data->disabled.destroy_semaphore) return false;
bool skip = false;
if (*sema_node) {
skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, "vkDestroySemaphore",
"VUID-vkDestroySemaphore-semaphore-01137");
}
return skip;
}
static void PreCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
SEMAPHORE_NODE *sema_node;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
if (!skip) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroySemaphore(dev_data, semaphore);
lock.unlock();
dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
}
}
static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
*event_state = GetEventNode(dev_data, event);
*obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent};
if (dev_data->instance_data->disabled.destroy_event) return false;
bool skip = false;
if (*event_state) {
skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, "vkDestroyEvent", "VUID-vkDestroyEvent-event-01145");
}
return skip;
}
static void PreCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
InvalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
dev_data->eventMap.erase(event);
}
VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
EVENT_STATE *event_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
if (!skip) {
if (event != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
}
}
static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
VK_OBJECT *obj_struct) {
*qp_state = GetQueryPoolNode(dev_data, query_pool);
*obj_struct = {HandleToUint64(query_pool), kVulkanObjectTypeQueryPool};
if (dev_data->instance_data->disabled.destroy_query_pool) return false;
bool skip = false;
if (*qp_state) {
skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, "vkDestroyQueryPool",
"VUID-vkDestroyQueryPool-queryPool-00793");
}
return skip;
}
static void PreCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state,
VK_OBJECT obj_struct) {
InvalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
dev_data->queryPoolMap.erase(query_pool);
}
VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
QUERY_POOL_NODE *qp_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
if (!skip) {
if (queryPool != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
}
}
static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
uint32_t query_count, VkQueryResultFlags flags,
unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
bool skip = false;
auto query_pool_state = dev_data->queryPoolMap.find(query_pool);
if (query_pool_state != dev_data->queryPoolMap.end()) {
if ((query_pool_state->second.createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
"VUID-vkGetQueryPoolResults-queryType-00818",
"QueryPool 0x%" PRIx64
" was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT.",
HandleToUint64(query_pool));
}
}
// TODO: clean this up, it's insanely wasteful.
for (auto cmd_buffer : dev_data->commandBufferMap) {
if (cmd_buffer.second->in_use.load()) {
for (auto query_state_pair : cmd_buffer.second->queryToStateMap) {
(*queries_in_flight)[query_state_pair.first].push_back(cmd_buffer.first);
}
}
}
return skip;
}
static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
uint32_t query_count,
unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
for (uint32_t i = 0; i < query_count; ++i) {
QueryObject query = {query_pool, first_query + i};
auto qif_pair = queries_in_flight->find(query);
auto query_state_pair = dev_data->queryToStateMap.find(query);
if (query_state_pair != dev_data->queryToStateMap.end()) {
// Available and in flight
if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
query_state_pair->second) {
for (auto cmd_buffer : qif_pair->second) {
auto cb = GetCBNode(dev_data, cmd_buffer);
auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
for (auto event : query_event_pair->second) {
dev_data->eventMap[event].needsSignaled = true;
}
}
}
}
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result =
dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
lock.lock();
PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
lock.unlock();
return result;
}
// Return true if given ranges intersect, else false
// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
// in an error so not checking that here
// pad_ranges bool indicates a linear and non-linear comparison which requires padding
// In the case where padding is required, if an alias is encountered then a validation error is reported and skip
// may be set by the callback function so caller should merge in skip value if padding case is possible.
// This check can be skipped by passing skip_checks=true, for call sites outside the validation path.
static bool RangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip,
bool skip_checks) {
*skip = false;
auto r1_start = range1->start;
auto r1_end = range1->end;
auto r2_start = range2->start;
auto r2_end = range2->end;
VkDeviceSize pad_align = 1;
if (range1->linear != range2->linear) {
pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
}
if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false;
if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false;
if (!skip_checks && (range1->linear != range2->linear)) {
// In linear vs. non-linear case, warn of aliasing
const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
const char *r1_type_str = range1->image ? "image" : "buffer";
const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
const char *r2_type_str = range2->image ? "image" : "buffer";
auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
*skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, kVUID_Core_MemTrack_InvalidAliasing,
"%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
" which may indicate a bug. For further info refer to the Buffer-Image Granularity section of the Vulkan "
"specification. "
"(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/xhtml/vkspec.html#resources-bufferimagegranularity)",
r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
}
// Ranges intersect
return true;
}
// Simplified RangesIntersect that calls above function to check range1 for intersection with offset & end addresses
bool RangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
// Create a local MEMORY_RANGE struct to wrap offset/size
MEMORY_RANGE range_wrap;
// Synch linear with range1 to avoid padding and potential validation error case
range_wrap.linear = range1->linear;
range_wrap.start = offset;
range_wrap.end = end;
bool tmp_bool;
return RangesIntersect(dev_data, range1, &range_wrap, &tmp_bool, true);
}
static bool ValidateInsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image,
bool is_linear, const char *api_name) {
bool skip = false;
MEMORY_RANGE range;
range.image = is_image;
range.handle = handle;
range.linear = is_linear;
range.memory = mem_info->mem;
range.start = memoryOffset;
range.size = memRequirements.size;
range.end = memoryOffset + memRequirements.size - 1;
range.aliases.clear();
// Check for aliasing problems.
for (auto &obj_range_pair : mem_info->bound_ranges) {
auto check_range = &obj_range_pair.second;
bool intersection_error = false;
if (RangesIntersect(dev_data, &range, check_range, &intersection_error, false)) {
skip |= intersection_error;
range.aliases.insert(check_range);
}
}
if (memoryOffset >= mem_info->alloc_info.allocationSize) {
std::string error_code =
is_image ? "VUID-vkBindImageMemory-memoryOffset-01046" : "VUID-vkBindBufferMemory-memoryOffset-01031";
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_info->mem), error_code,
"In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64 "), memoryOffset=0x%" PRIxLEAST64
" must be less than the memory allocation size 0x%" PRIxLEAST64 ".",
api_name, HandleToUint64(mem_info->mem), HandleToUint64(handle), memoryOffset,
mem_info->alloc_info.allocationSize);
}
return skip;
}
// Object with given handle is being bound to memory w/ given mem_info struct.
// Track the newly bound memory range with given memoryOffset
// Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
// and non-linear range incorrectly overlap.
// Return true if an error is flagged and the user callback returns "true", otherwise false
// is_image indicates an image object, otherwise handle is for a buffer
// is_linear indicates a buffer or linear image
static void InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
MEMORY_RANGE range;
range.image = is_image;
range.handle = handle;
range.linear = is_linear;
range.memory = mem_info->mem;
range.start = memoryOffset;
range.size = memRequirements.size;
range.end = memoryOffset + memRequirements.size - 1;
range.aliases.clear();
// Update Memory aliasing
// Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
// inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
for (auto &obj_range_pair : mem_info->bound_ranges) {
auto check_range = &obj_range_pair.second;
bool intersection_error = false;
if (RangesIntersect(dev_data, &range, check_range, &intersection_error, true)) {
range.aliases.insert(check_range);
tmp_alias_ranges.insert(check_range);
}
}
mem_info->bound_ranges[handle] = std::move(range);
for (auto tmp_range : tmp_alias_ranges) {
tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
}
if (is_image)
mem_info->bound_images.insert(handle);
else
mem_info->bound_buffers.insert(handle);
}
static bool ValidateInsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
const char *api_name) {
return ValidateInsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear, api_name);
}
static void InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
VkMemoryRequirements mem_reqs, bool is_linear) {
InsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear);
}
static bool ValidateInsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) {
return ValidateInsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true, api_name);
}
static void InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
VkMemoryRequirements mem_reqs) {
InsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true);
}
// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
// is_image indicates if handle is for image or buffer
// This function will also remove the handle-to-index mapping from the appropriate
// map and clean up any aliases for range being removed.
static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
auto erase_range = &mem_info->bound_ranges[handle];
for (auto alias_range : erase_range->aliases) {
alias_range->aliases.erase(erase_range);
}
erase_range->aliases.clear();
mem_info->bound_ranges.erase(handle);
if (is_image) {
mem_info->bound_images.erase(handle);
} else {
mem_info->bound_buffers.erase(handle);
}
}
void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
BUFFER_STATE *buffer_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
if (!skip) {
if (buffer != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
}
}
VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Common data objects used pre & post call
BUFFER_VIEW_STATE *buffer_view_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
// Validate state before calling down chain, update common data if we'll be calling down chain
bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
if (!skip) {
if (bufferView != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
}
}
VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
IMAGE_STATE *image_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
if (!skip) {
if (image != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
}
}
static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
const char *funcName, std::string msgCode) {
bool skip = false;
if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_info->mem), msgCode,
"%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
"type (0x%X) of this memory object 0x%" PRIx64 ".",
funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, HandleToUint64(mem_info->mem));
}
return skip;
}
static bool PreCallValidateBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
VkDeviceSize memoryOffset, const char *api_name) {
bool skip = false;
if (buffer_state) {
unique_lock_t lock(global_lock);
// Track objects tied to memory
uint64_t buffer_handle = HandleToUint64(buffer);
skip = ValidateSetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, api_name);
if (!buffer_state->memory_requirements_checked) {
// There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
// BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from
// vkGetBufferMemoryRequirements()
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
buffer_handle, kVUID_Core_DrawState_InvalidBuffer,
"%s: Binding memory to buffer 0x%" PRIx64
" but vkGetBufferMemoryRequirements() has not been called on that buffer.",
api_name, HandleToUint64(buffer_handle));
// Make the call for them so we can verify the state
lock.unlock();
dev_data->dispatch_table.GetBufferMemoryRequirements(dev_data->device, buffer, &buffer_state->requirements);
lock.lock();
}
// Validate bound memory range information
const auto mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
skip |= ValidateInsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements, api_name);
skip |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, api_name,
"VUID-vkBindBufferMemory-memory-01035");
}
// Validate memory requirements alignment
if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
buffer_handle, "VUID-vkBindBufferMemory-memoryOffset-01036",
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetBufferMemoryRequirements with buffer.",
api_name, memoryOffset, buffer_state->requirements.alignment);
}
if (mem_info) {
// Validate memory requirements size
if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
buffer_handle, "VUID-vkBindBufferMemory-size-01037",
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetBufferMemoryRequirements with buffer.",
api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size);
}
// Validate dedicated allocation
if (mem_info->is_dedicated && ((mem_info->dedicated_buffer != buffer) || (memoryOffset != 0))) {
// TODO: Add vkBindBufferMemory2KHR error message when added to spec.
auto validation_error = kVUIDUndefined;
if (strcmp(api_name, "vkBindBufferMemory()") == 0) {
validation_error = "VUID-vkBindBufferMemory-memory-01508";
}
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
buffer_handle, validation_error,
"%s: for dedicated memory allocation 0x%" PRIxLEAST64
", VkMemoryDedicatedAllocateInfoKHR::buffer 0x%" PRIXLEAST64 " must be equal to buffer 0x%" PRIxLEAST64
" and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
api_name, HandleToUint64(mem), HandleToUint64(mem_info->dedicated_buffer), buffer_handle, memoryOffset);
}
}
}
return skip;
}
static void PostCallRecordBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
VkDeviceSize memoryOffset, const char *api_name) {
if (buffer_state) {
unique_lock_t lock(global_lock);
// Track bound memory range information
auto mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
}
// Track objects tied to memory
uint64_t buffer_handle = HandleToUint64(buffer);
SetMemBinding(dev_data, mem, buffer_state, memoryOffset, buffer_handle, kVulkanObjectTypeBuffer, api_name);
}
}
VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
BUFFER_STATE *buffer_state;
{
unique_lock_t lock(global_lock);
buffer_state = GetBufferState(dev_data, buffer);
}
bool skip = PreCallValidateBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset, "vkBindBufferMemory()");
if (!skip) {
result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
if (result == VK_SUCCESS) {
PostCallRecordBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset, "vkBindBufferMemory()");
}
}
return result;
}
static bool PreCallValidateBindBufferMemory2(layer_data *dev_data, std::vector<BUFFER_STATE *> *buffer_state,
uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) {
{
unique_lock_t lock(global_lock);
for (uint32_t i = 0; i < bindInfoCount; i++) {
(*buffer_state)[i] = GetBufferState(dev_data, pBindInfos[i].buffer);
}
}
bool skip = false;
char api_name[64];
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i);
skip |= PreCallValidateBindBufferMemory(dev_data, pBindInfos[i].buffer, (*buffer_state)[i], pBindInfos[i].memory,
pBindInfos[i].memoryOffset, api_name);
}
return skip;
}
static void PostCallRecordBindBufferMemory2(layer_data *dev_data, const std::vector<BUFFER_STATE *> &buffer_state,
uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) {
for (uint32_t i = 0; i < bindInfoCount; i++) {
PostCallRecordBindBufferMemory(dev_data, pBindInfos[i].buffer, buffer_state[i], pBindInfos[i].memory,
pBindInfos[i].memoryOffset, "vkBindBufferMemory2()");
}
}
VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR *pBindInfos) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
std::vector<BUFFER_STATE *> buffer_state(bindInfoCount);
if (!PreCallValidateBindBufferMemory2(dev_data, &buffer_state, bindInfoCount, pBindInfos)) {
result = dev_data->dispatch_table.BindBufferMemory2(device, bindInfoCount, pBindInfos);
if (result == VK_SUCCESS) {
PostCallRecordBindBufferMemory2(dev_data, buffer_state, bindInfoCount, pBindInfos);
}
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR *pBindInfos) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
std::vector<BUFFER_STATE *> buffer_state(bindInfoCount);
if (!PreCallValidateBindBufferMemory2(dev_data, &buffer_state, bindInfoCount, pBindInfos)) {
result = dev_data->dispatch_table.BindBufferMemory2KHR(device, bindInfoCount, pBindInfos);
if (result == VK_SUCCESS) {
PostCallRecordBindBufferMemory2(dev_data, buffer_state, bindInfoCount, pBindInfos);
}
}
return result;
}
static void PostCallRecordGetBufferMemoryRequirements(layer_data *dev_data, VkBuffer buffer,
VkMemoryRequirements *pMemoryRequirements) {
BUFFER_STATE *buffer_state;
{
unique_lock_t lock(global_lock);
buffer_state = GetBufferState(dev_data, buffer);
}
if (buffer_state) {
buffer_state->requirements = *pMemoryRequirements;
buffer_state->memory_requirements_checked = true;
}
}
VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
VkMemoryRequirements *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
PostCallRecordGetBufferMemoryRequirements(dev_data, buffer, pMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
VkMemoryRequirements2KHR *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements);
PostCallRecordGetBufferMemoryRequirements(dev_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
VkMemoryRequirements2KHR *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
PostCallRecordGetBufferMemoryRequirements(dev_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
}
static void PostCallRecordGetImageMemoryRequirements(layer_data *dev_data, VkImage image,
VkMemoryRequirements *pMemoryRequirements) {
IMAGE_STATE *image_state;
{
unique_lock_t lock(global_lock);
image_state = GetImageState(dev_data, image);
}
if (image_state) {
image_state->requirements = *pMemoryRequirements;
image_state->memory_requirements_checked = true;
}
}
VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
PostCallRecordGetImageMemoryRequirements(dev_data, image, pMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2KHR *pInfo,
VkMemoryRequirements2KHR *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetImageMemoryRequirements2(device, pInfo, pMemoryRequirements);
PostCallRecordGetImageMemoryRequirements(dev_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2KHR *pInfo,
VkMemoryRequirements2KHR *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
PostCallRecordGetImageMemoryRequirements(dev_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
}
static void PostCallRecordGetImageSparseMemoryRequirements(IMAGE_STATE *image_state, uint32_t req_count,
VkSparseImageMemoryRequirements *reqs) {
image_state->get_sparse_reqs_called = true;
image_state->sparse_requirements.resize(req_count);
if (reqs) {
std::copy(reqs, reqs + req_count, image_state->sparse_requirements.begin());
}
for (const auto &req : image_state->sparse_requirements) {
if (req.formatProperties.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
image_state->sparse_metadata_required = true;
}
}
}
VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t *pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements *pSparseMemoryRequirements) {
// TODO : Implement tracking here, just passthrough initially
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetImageSparseMemoryRequirements(device, image, pSparseMemoryRequirementCount,
pSparseMemoryRequirements);
unique_lock_t lock(global_lock);
auto image_state = GetImageState(dev_data, image);
PostCallRecordGetImageSparseMemoryRequirements(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
}
static void PostCallRecordGetImageSparseMemoryRequirements2(IMAGE_STATE *image_state, uint32_t req_count,
VkSparseImageMemoryRequirements2KHR *reqs) {
// reqs is empty, so there is nothing to loop over and read.
if (reqs == nullptr) {
return;
}
std::vector<VkSparseImageMemoryRequirements> sparse_reqs(req_count);
// Migrate to old struct type for common handling with GetImageSparseMemoryRequirements()
for (uint32_t i = 0; i < req_count; ++i) {
assert(!reqs[i].pNext); // TODO: If an extension is ever added here we need to handle it
sparse_reqs[i] = reqs[i].memoryRequirements;
}
PostCallRecordGetImageSparseMemoryRequirements(image_state, req_count, sparse_reqs.data());
}
VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
uint32_t *pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
// TODO : Implement tracking here, just passthrough initially
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetImageSparseMemoryRequirements2(device, pInfo, pSparseMemoryRequirementCount,
pSparseMemoryRequirements);
unique_lock_t lock(global_lock);
auto image_state = GetImageState(dev_data, pInfo->image);
PostCallRecordGetImageSparseMemoryRequirements2(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2KHR(VkDevice device,
const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
uint32_t *pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
// TODO : Implement tracking here, just passthrough initially
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetImageSparseMemoryRequirements2KHR(device, pInfo, pSparseMemoryRequirementCount,
pSparseMemoryRequirements);
unique_lock_t lock(global_lock);
auto image_state = GetImageState(dev_data, pInfo->image);
PostCallRecordGetImageSparseMemoryRequirements2(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format,
VkImageType type, VkSampleCountFlagBits samples,
VkImageUsageFlags usage, VkImageTiling tiling,
uint32_t *pPropertyCount,
VkSparseImageFormatProperties *pProperties) {
// TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling,
pPropertyCount, pProperties);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2(
VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount,
VkSparseImageFormatProperties2KHR *pProperties) {
// TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties2(physicalDevice, pFormatInfo, pPropertyCount,
pProperties);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2KHR(
VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount,
VkSparseImageFormatProperties2KHR *pProperties) {
// TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties2KHR(physicalDevice, pFormatInfo, pPropertyCount,
pProperties);
}
VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Common data objects used pre & post call
IMAGE_VIEW_STATE *image_view_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
if (!skip) {
if (imageView != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
}
}
static void PreCallRecordDestroyShaderModule(layer_data *dev_data, VkShaderModule shaderModule) {
dev_data->shaderModuleMap.erase(shaderModule);
}
VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyShaderModule(dev_data, shaderModule);
lock.unlock();
dev_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
}
static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
VK_OBJECT *obj_struct) {
*pipeline_state = GetPipelineState(dev_data, pipeline);
*obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline};
if (dev_data->instance_data->disabled.destroy_pipeline) return false;
bool skip = false;
if (*pipeline_state) {
skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, "vkDestroyPipeline",
"VUID-vkDestroyPipeline-pipeline-00765");
}
return skip;
}
static void PreCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
VK_OBJECT obj_struct) {
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
dev_data->pipelineMap.erase(pipeline);
}
VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
PIPELINE_STATE *pipeline_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
if (!skip) {
if (pipeline != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
}
}
static void PreCallRecordDestroyPipelineLayout(layer_data *dev_data, VkPipelineLayout pipelineLayout) {
dev_data->pipelineLayoutMap.erase(pipelineLayout);
}
VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyPipelineLayout(dev_data, pipelineLayout);
lock.unlock();
dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
}
static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
VK_OBJECT *obj_struct) {
*sampler_state = GetSamplerState(dev_data, sampler);
*obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler};
if (dev_data->instance_data->disabled.destroy_sampler) return false;
bool skip = false;
if (*sampler_state) {
skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, "vkDestroySampler",
"VUID-vkDestroySampler-sampler-01082");
}
return skip;
}
static void PreCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
VK_OBJECT obj_struct) {
// Any bound cmd buffers are now invalid
if (sampler_state) InvalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
dev_data->samplerMap.erase(sampler);
}
VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
SAMPLER_STATE *sampler_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
if (!skip) {
if (sampler != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
}
}
static void PreCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
auto layout_it = dev_data->descriptorSetLayoutMap.find(ds_layout);
if (layout_it != dev_data->descriptorSetLayoutMap.end()) {
layout_it->second.get()->MarkDestroyed();
dev_data->descriptorSetLayoutMap.erase(layout_it);
}
}
VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
{
lock_guard_t lock(global_lock);
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
}
dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
}
static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
*desc_pool_state = GetDescriptorPoolState(dev_data, pool);
*obj_struct = {HandleToUint64(pool), kVulkanObjectTypeDescriptorPool};
if (dev_data->instance_data->disabled.destroy_descriptor_pool) return false;
bool skip = false;
if (*desc_pool_state) {
skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, "vkDestroyDescriptorPool",
"VUID-vkDestroyDescriptorPool-descriptorPool-00303");
}
return skip;
}
static void PreCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
if (desc_pool_state) {
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
// Free sets that were in this pool
for (auto ds : desc_pool_state->sets) {
FreeDescriptorSet(dev_data, ds);
}
dev_data->descriptorPoolMap.erase(descriptorPool);
delete desc_pool_state;
}
}
VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
if (!skip) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
lock.unlock();
dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
}
}
// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
// If this is a secondary command buffer, then make sure its primary is also in-flight
// If primary is not in-flight, then remove secondary from global in-flight set
// This function is only valid at a point when cmdBuffer is being reset or freed
static bool CheckCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
std::string error_code) {
bool skip = false;
if (cb_node->in_use.load()) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), error_code,
"Attempt to %s command buffer (0x%" PRIx64 ") which is in use.", action,
HandleToUint64(cb_node->commandBuffer));
}
return skip;
}
// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
static bool CheckCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
std::string error_code) {
bool skip = false;
for (auto cmd_buffer : pPool->commandBuffers) {
skip |= CheckCommandBufferInFlight(dev_data, GetCBNode(dev_data, cmd_buffer), action, error_code);
}
return skip;
}
// Free all command buffers in given list, removing all references/links to them using ResetCommandBufferState
static void FreeCommandBufferStates(layer_data *dev_data, COMMAND_POOL_NODE *pool_state, const uint32_t command_buffer_count,
const VkCommandBuffer *command_buffers) {
for (uint32_t i = 0; i < command_buffer_count; i++) {
auto cb_state = GetCBNode(dev_data, command_buffers[i]);
// Remove references to command buffer's state and delete
if (cb_state) {
// reset prior to delete, removing various references to it.
// TODO: fix this, it's insane.
ResetCommandBufferState(dev_data, cb_state->commandBuffer);
// Remove the cb_state's references from layer_data and COMMAND_POOL_NODE
dev_data->commandBufferMap.erase(cb_state->commandBuffer);
pool_state->commandBuffers.erase(command_buffers[i]);
delete cb_state;
}
}
}
static bool PreCallValidateFreeCommandBuffers(layer_data *dev_data, uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers) {
bool skip = false;
for (uint32_t i = 0; i < commandBufferCount; i++) {
auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
// Delete CB information structure, and remove from commandBufferMap
if (cb_node) {
skip |= CheckCommandBufferInFlight(dev_data, cb_node, "free", "VUID-vkFreeCommandBuffers-pCommandBuffers-00047");
}
}
return skip;
}
static void PreCallRecordFreeCommandBuffers(layer_data *dev_data, VkCommandPool commandPool, uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers) {
auto pPool = GetCommandPoolNode(dev_data, commandPool);
FreeCommandBufferStates(dev_data, pPool, commandBufferCount, pCommandBuffers);
}
VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateFreeCommandBuffers(dev_data, commandBufferCount, pCommandBuffers);
if (skip) return;
PreCallRecordFreeCommandBuffers(dev_data, commandPool, commandBufferCount, pCommandBuffers);
lock.unlock();
dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
}
static void PostCallRecordCreateCommandPool(layer_data *dev_data, const VkCommandPoolCreateInfo *pCreateInfo,
VkCommandPool *pCommandPool) {
dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
if (VK_SUCCESS == result) {
lock_guard_t lock(global_lock);
PostCallRecordCreateCommandPool(dev_data, pCreateInfo, pCommandPool);
}
return result;
}
static bool PreCallValidateCreateQueryPool(layer_data *dev_data, const VkQueryPoolCreateInfo *pCreateInfo) {
bool skip = false;
if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
if (!dev_data->enabled_features.core.pipelineStatisticsQuery) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
"VUID-VkQueryPoolCreateInfo-queryType-00791",
"Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with "
"VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE.");
}
}
return skip;
}
static void PostCallRecordCreateQueryPool(layer_data *dev_data, const VkQueryPoolCreateInfo *pCreateInfo, VkQueryPool *pQueryPool) {
QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
qp_node->createInfo = *pCreateInfo;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateQueryPool(dev_data, pCreateInfo);
lock.unlock();
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
if (!skip) {
result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
}
if (result == VK_SUCCESS) {
lock.lock();
PostCallRecordCreateQueryPool(dev_data, pCreateInfo, pQueryPool);
}
return result;
}
static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool) {
COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(dev_data, pool);
if (dev_data->instance_data->disabled.destroy_command_pool) return false;
bool skip = false;
if (cp_state) {
// Verify that command buffers in pool are complete (not in-flight)
skip |= CheckCommandBuffersInFlight(dev_data, cp_state, "destroy command pool with",
"VUID-vkDestroyCommandPool-commandPool-00041");
}
return skip;
}
static void PreCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool) {
COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(dev_data, pool);
// Remove cmdpool from cmdpoolmap, after freeing layer data for the command buffers
// "When a pool is destroyed, all command buffers allocated from the pool are freed."
if (cp_state) {
// Create a vector, as FreeCommandBufferStates deletes from cp_state->commandBuffers during iteration.
std::vector<VkCommandBuffer> cb_vec{cp_state->commandBuffers.begin(), cp_state->commandBuffers.end()};
FreeCommandBufferStates(dev_data, cp_state, static_cast<uint32_t>(cb_vec.size()), cb_vec.data());
dev_data->commandPoolMap.erase(pool);
}
}
// Destroy commandPool along with all of the commandBuffers allocated from that pool
VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool);
if (!skip) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyCommandPool(dev_data, commandPool);
lock.unlock();
dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
}
}
static bool PreCallValidateResetCommandPool(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
return CheckCommandBuffersInFlight(dev_data, pPool, "reset command pool with", "VUID-vkResetCommandPool-commandPool-00040");
}
static void PostCallRecordResetCommandPool(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
for (auto cmdBuffer : pPool->commandBuffers) {
ResetCommandBufferState(dev_data, cmdBuffer);
}
}
VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
auto pPool = GetCommandPoolNode(dev_data, commandPool);
bool skip = PreCallValidateResetCommandPool(dev_data, pPool);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
// Reset all of the CBs allocated from this pool
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordResetCommandPool(dev_data, pPool);
lock.unlock();
}
return result;
}
static bool PreCallValidateResetFences(layer_data *dev_data, uint32_t fenceCount, const VkFence *pFences) {
bool skip = false;
for (uint32_t i = 0; i < fenceCount; ++i) {
auto pFence = GetFenceNode(dev_data, pFences[i]);
if (pFence && pFence->scope == kSyncScopeInternal && pFence->state == FENCE_INFLIGHT) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(pFences[i]), "VUID-vkResetFences-pFences-01123", "Fence 0x%" PRIx64 " is in use.",
HandleToUint64(pFences[i]));
}
}
return skip;
}
static void PostCallRecordResetFences(layer_data *dev_data, uint32_t fenceCount, const VkFence *pFences) {
for (uint32_t i = 0; i < fenceCount; ++i) {
auto pFence = GetFenceNode(dev_data, pFences[i]);
if (pFence) {
if (pFence->scope == kSyncScopeInternal) {
pFence->state = FENCE_UNSIGNALED;
} else if (pFence->scope == kSyncScopeExternalTemporary) {
pFence->scope = kSyncScopeInternal;
}
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateResetFences(dev_data, fenceCount, pFences);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
if (result == VK_SUCCESS) {
lock.lock();
PostCallRecordResetFences(dev_data, fenceCount, pFences);
lock.unlock();
}
return result;
}
// For given cb_nodes, invalidate them and track object causing invalidation
void InvalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
for (auto cb_node : cb_nodes) {
if (cb_node->state == CB_RECORDING) {
log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer,
"Invalidating a command buffer that's currently being recorded: 0x%" PRIx64 ".",
HandleToUint64(cb_node->commandBuffer));
cb_node->state = CB_INVALID_INCOMPLETE;
} else if (cb_node->state == CB_RECORDED) {
cb_node->state = CB_INVALID_COMPLETE;
}
cb_node->broken_bindings.push_back(obj);
// if secondary, then propagate the invalidation to the primaries that will call us.
if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
InvalidateCommandBuffers(dev_data, cb_node->linkedCommandBuffers, obj);
}
}
}
static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
*framebuffer_state = GetFramebufferState(dev_data, framebuffer);
*obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer};
if (dev_data->instance_data->disabled.destroy_framebuffer) return false;
bool skip = false;
if (*framebuffer_state) {
skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, "vkDestroyFramebuffer",
"VUID-vkDestroyFramebuffer-framebuffer-00892");
}
return skip;
}
static void PreCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
VK_OBJECT obj_struct) {
InvalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
dev_data->frameBufferMap.erase(framebuffer);
}
VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
FRAMEBUFFER_STATE *framebuffer_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
if (!skip) {
if (framebuffer != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
}
}
static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
VK_OBJECT *obj_struct) {
*rp_state = GetRenderPassState(dev_data, render_pass);
*obj_struct = {HandleToUint64(render_pass), kVulkanObjectTypeRenderPass};
if (dev_data->instance_data->disabled.destroy_renderpass) return false;
bool skip = false;
if (*rp_state) {
skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, "vkDestroyRenderPass",
"VUID-vkDestroyRenderPass-renderPass-00873");
}
return skip;
}
static void PreCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
VK_OBJECT obj_struct) {
InvalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
dev_data->renderPassMap.erase(render_pass);
}
VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
RENDER_PASS_STATE *rp_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
if (!skip) {
if (renderPass != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateBuffer(dev_data, pCreateInfo);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateBuffer(dev_data, pCreateInfo, pBuffer);
lock.unlock();
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateBufferView(dev_data, pCreateInfo, pView);
lock.unlock();
}
return result;
}
// Access helper functions for external modules
VkFormatProperties GetFormatProperties(const core_validation::layer_data *device_data, const VkFormat format) {
VkFormatProperties format_properties;
instance_layer_data *instance_data =
GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &format_properties);
return format_properties;
}
VkResult GetImageFormatProperties(core_validation::layer_data *device_data, const VkImageCreateInfo *image_ci,
VkImageFormatProperties *pImageFormatProperties) {
instance_layer_data *instance_data =
GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
return instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties(
device_data->physical_device, image_ci->format, image_ci->imageType, image_ci->tiling, image_ci->usage, image_ci->flags,
pImageFormatProperties);
}
const debug_report_data *GetReportData(const core_validation::layer_data *device_data) { return device_data->report_data; }
const VkPhysicalDeviceProperties *GetPhysicalDeviceProperties(const core_validation::layer_data *device_data) {
return &device_data->phys_dev_props;
}
const CHECK_DISABLED *GetDisables(core_validation::layer_data *device_data) { return &device_data->instance_data->disabled; }
std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *device_data) {
return &device_data->imageMap;
}
std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(core_validation::layer_data *device_data) {
return &device_data->imageSubresourceMap;
}
std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *device_data) {
return &device_data->imageLayoutMap;
}
std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *GetImageLayoutMap(layer_data const *device_data) {
return &device_data->imageLayoutMap;
}
std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *GetBufferMap(layer_data *device_data) {
return &device_data->bufferMap;
}
std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *GetBufferViewMap(layer_data *device_data) {
return &device_data->bufferViewMap;
}
std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *GetImageViewMap(layer_data *device_data) {
return &device_data->imageViewMap;
}
const PHYS_DEV_PROPERTIES_NODE *GetPhysDevProperties(const layer_data *device_data) { return &device_data->phys_dev_properties; }
const DeviceFeatures *GetEnabledFeatures(const layer_data *device_data) { return &device_data->enabled_features; }
const DeviceExtensions *GetDeviceExtensions(const layer_data *device_data) { return &device_data->extensions; }
uint32_t GetApiVersion(const layer_data *device_data) { return device_data->api_version; }
VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = PreCallValidateCreateImage(dev_data, pCreateInfo, pAllocator, pImage);
if (!skip) {
result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
}
if (VK_SUCCESS == result) {
lock_guard_t lock(global_lock);
PostCallRecordCreateImage(dev_data, pCreateInfo, pImage);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
lock.unlock();
}
return result;
}
static void PostCallRecordCreateFence(layer_data *dev_data, const VkFenceCreateInfo *pCreateInfo, VkFence *pFence) {
auto &fence_node = dev_data->fenceMap[*pFence];
fence_node.fence = *pFence;
fence_node.createInfo = *pCreateInfo;
fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
if (VK_SUCCESS == result) {
lock_guard_t lock(global_lock);
PostCallRecordCreateFence(dev_data, pCreateInfo, pFence);
}
return result;
}
// TODO handle pipeline caches
VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Pre-record to avoid Destroy/Create race (if/when implemented)
dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
}
VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize,
void *pData) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
const VkPipelineCache *pSrcCaches) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
return result;
}
// Validation cache:
// CV is the bottommost implementor of this extension. Don't pass calls down.
VKAPI_ATTR VkResult VKAPI_CALL CreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkValidationCacheEXT *pValidationCache) {
*pValidationCache = ValidationCache::Create(pCreateInfo);
return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
}
VKAPI_ATTR void VKAPI_CALL DestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
const VkAllocationCallbacks *pAllocator) {
delete (ValidationCache *)validationCache;
}
VKAPI_ATTR VkResult VKAPI_CALL GetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize,
void *pData) {
size_t inSize = *pDataSize;
((ValidationCache *)validationCache)->Write(pDataSize, pData);
return (pData && *pDataSize != inSize) ? VK_INCOMPLETE : VK_SUCCESS;
}
VKAPI_ATTR VkResult VKAPI_CALL MergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
const VkValidationCacheEXT *pSrcCaches) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
auto dst = (ValidationCache *)dstCache;
auto src = (ValidationCache const *const *)pSrcCaches;
VkResult result = VK_SUCCESS;
for (uint32_t i = 0; i < srcCacheCount; i++) {
if (src[i] == dst) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT,
0, "VUID-vkMergeValidationCachesEXT-dstCache-01536",
"vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.",
HandleToUint64(dstCache));
result = VK_ERROR_VALIDATION_FAILED_EXT;
}
if (!skip) {
dst->Merge(src[i]);
}
}
return result;
}
// utility function to set collective state for pipeline
void SetPipelineState(PIPELINE_STATE *pPipe) {
// If any attachment used by this pipeline has blendEnable, set top-level blendEnable
if (pPipe->graphicsPipelineCI.pColorBlendState) {
for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
if (VK_TRUE == pPipe->attachments[i].blendEnable) {
if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
(pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
(pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
(pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
(pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
pPipe->blendConstantsEnabled = true;
}
}
}
}
}
static bool PreCallValidateCreateGraphicsPipelines(layer_data *dev_data, vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state,
const uint32_t count, const VkGraphicsPipelineCreateInfo *pCreateInfos) {
bool skip = false;
pipe_state->reserve(count);
// TODO - State changes and validation need to be untangled here
for (uint32_t i = 0; i < count; i++) {
pipe_state->push_back(std::unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
(*pipe_state)[i]->initGraphicsPipeline(&pCreateInfos[i], GetRenderPassStateSharedPtr(dev_data, pCreateInfos[i].renderPass));
(*pipe_state)[i]->pipeline_layout = *GetPipelineLayout(dev_data, pCreateInfos[i].layout);
}
for (uint32_t i = 0; i < count; i++) {
skip |= ValidatePipelineLocked(dev_data, *pipe_state, i);
}
for (uint32_t i = 0; i < count; i++) {
skip |= ValidatePipelineUnlocked(dev_data, *pipe_state, i);
}
return skip;
}
static void PostCallRecordCreateGraphicsPipelines(layer_data *dev_data, vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state,
const uint32_t count, VkPipeline *pPipelines) {
for (uint32_t i = 0; i < count; i++) {
if (pPipelines[i] != VK_NULL_HANDLE) {
(*pipe_state)[i]->pipeline = pPipelines[i];
dev_data->pipelineMap[pPipelines[i]] = std::move((*pipe_state)[i]);
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
// The order of operations here is a little convoluted but gets the job done
// 1. Pipeline create state is first shadowed into PIPELINE_STATE struct
// 2. Create state is then validated (which uses flags setup during shadowing)
// 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
vector<std::unique_ptr<PIPELINE_STATE>> pipe_state;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateGraphicsPipelines(dev_data, &pipe_state, count, pCreateInfos);
if (skip) {
for (uint32_t i = 0; i < count; i++) {
pPipelines[i] = VK_NULL_HANDLE;
}
return VK_ERROR_VALIDATION_FAILED_EXT;
}
lock.unlock();
auto result =
dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
lock.lock();
PostCallRecordCreateGraphicsPipelines(dev_data, &pipe_state, count, pPipelines);
return result;
}
static bool PreCallValidateCreateComputePipelines(layer_data *dev_data, vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state,
const uint32_t count, const VkComputePipelineCreateInfo *pCreateInfos) {
bool skip = false;
pipe_state->reserve(count);
for (uint32_t i = 0; i < count; i++) {
// Create and initialize internal tracking data structure
pipe_state->push_back(unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
(*pipe_state)[i]->initComputePipeline(&pCreateInfos[i]);
(*pipe_state)[i]->pipeline_layout = *GetPipelineLayout(dev_data, pCreateInfos[i].layout);
// TODO: Add Compute Pipeline Verification
skip |= ValidateComputePipeline(dev_data, (*pipe_state)[i].get());
}
return skip;
}
static void PostCallRecordCreateComputePipelines(layer_data *dev_data, vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state,
const uint32_t count, VkPipeline *pPipelines) {
for (uint32_t i = 0; i < count; i++) {
if (pPipelines[i] != VK_NULL_HANDLE) {
(*pipe_state)[i]->pipeline = pPipelines[i];
dev_data->pipelineMap[pPipelines[i]] = std::move((*pipe_state)[i]);
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
vector<std::unique_ptr<PIPELINE_STATE>> pipe_state;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateComputePipelines(dev_data, &pipe_state, count, pCreateInfos);
if (skip) {
for (uint32_t i = 0; i < count; i++) {
pPipelines[i] = VK_NULL_HANDLE;
}
return VK_ERROR_VALIDATION_FAILED_EXT;
}
lock.unlock();
auto result =
dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
lock.lock();
PostCallRecordCreateComputePipelines(dev_data, &pipe_state, count, pPipelines);
return result;
}
static bool PreCallValidateCreateRaytracingPipelinesNVX(layer_data *dev_data, uint32_t count,
const VkRaytracingPipelineCreateInfoNVX *pCreateInfos,
vector<std::unique_ptr<PIPELINE_STATE>> &pipe_state) {
bool skip = false;
// The order of operations here is a little convoluted but gets the job done
// 1. Pipeline create state is first shadowed into PIPELINE_STATE struct
// 2. Create state is then validated (which uses flags setup during shadowing)
// 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
uint32_t i = 0;
for (i = 0; i < count; i++) {
pipe_state.push_back(std::unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
pipe_state[i]->initRaytracingPipelineNVX(&pCreateInfos[i]);
pipe_state[i]->pipeline_layout = *GetPipelineLayout(dev_data, pCreateInfos[i].layout);
}
for (i = 0; i < count; i++) {
skip |= ValidateRaytracingPipelineNVX(dev_data, pipe_state[i].get());
}
return skip;
}
static void PostCallRecordCreateRaytracingPipelinesNVX(layer_data *dev_data, uint32_t count,
vector<std::unique_ptr<PIPELINE_STATE>> &pipe_state,
VkPipeline *pPipelines) {
for (uint32_t i = 0; i < count; i++) {
if (pPipelines[i] != VK_NULL_HANDLE) {
pipe_state[i]->pipeline = pPipelines[i];
dev_data->pipelineMap[pPipelines[i]] = std::move(pipe_state[i]);
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateRaytracingPipelinesNVX(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkRaytracingPipelineCreateInfoNVX *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
bool skip = false;
vector<std::unique_ptr<PIPELINE_STATE>> pipe_state;
pipe_state.reserve(count);
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
skip |= PreCallValidateCreateRaytracingPipelinesNVX(dev_data, count, pCreateInfos, pipe_state);
lock.unlock();
if (skip) {
for (uint32_t i = 0; i < count; i++) {
pPipelines[i] = VK_NULL_HANDLE;
}
return VK_ERROR_VALIDATION_FAILED_EXT;
}
auto result =
dev_data->dispatch_table.CreateRaytracingPipelinesNVX(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
lock.lock();
PostCallRecordCreateRaytracingPipelinesNVX(dev_data, count, pipe_state, pPipelines);
return result;
}
static void PostCallRecordCreateSampler(layer_data *dev_data, const VkSamplerCreateInfo *pCreateInfo, VkSampler *pSampler) {
dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
}
VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
if (VK_SUCCESS == result) {
lock_guard_t lock(global_lock);
PostCallRecordCreateSampler(dev_data, pCreateInfo, pSampler);
}
return result;
}
static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
if (dev_data->instance_data->disabled.create_descriptor_set_layout) return false;
return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(
dev_data->report_data, create_info, dev_data->extensions.vk_khr_push_descriptor,
dev_data->phys_dev_ext_props.max_push_descriptors, dev_data->extensions.vk_ext_descriptor_indexing,
&dev_data->enabled_features.descriptor_indexing,
&dev_data->enabled_features.inline_uniform_block,
&dev_data->phys_dev_ext_props.inline_uniform_block_props);
}
static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
VkDescriptorSetLayout set_layout) {
dev_data->descriptorSetLayoutMap[set_layout] = std::make_shared<cvdescriptorset::DescriptorSetLayout>(create_info, set_layout);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorSetLayout *pSetLayout) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
if (!skip) {
lock.unlock();
result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
}
}
return result;
}
// Used by CreatePipelineLayout and CmdPushConstants.
// Note that the index argument is optional and only used by CreatePipelineLayout.
static bool ValidatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
const char *caller_name, uint32_t index = 0) {
if (dev_data->instance_data->disabled.push_constant_range) return false;
uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
bool skip = false;
// Check that offset + size don't exceed the max.
// Prevent arithetic overflow here by avoiding addition and testing in this order.
if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
// This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
if (offset >= maxPushConstantsSize) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-offset-00294",
"%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
caller_name, index, offset, maxPushConstantsSize);
}
if (size > maxPushConstantsSize - offset) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-size-00298",
"%s call has push constants index %u with offset %u and size %u that exceeds this device's "
"maxPushConstantSize of %u.",
caller_name, index, offset, size, maxPushConstantsSize);
}
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
if (offset >= maxPushConstantsSize) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-offset-00370",
"%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
caller_name, index, offset, maxPushConstantsSize);
}
if (size > maxPushConstantsSize - offset) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-size-00371",
"%s call has push constants index %u with offset %u and size %u that exceeds this device's "
"maxPushConstantSize of %u.",
caller_name, index, offset, size, maxPushConstantsSize);
}
} else {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
// size needs to be non-zero and a multiple of 4.
if ((size == 0) || ((size & 0x3) != 0)) {
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
if (size == 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-size-00296",
"%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
index, size);
}
if (size & 0x3) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-size-00297",
"%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
index, size);
}
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
if (size == 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-size-arraylength",
"%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
index, size);
}
if (size & 0x3) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-size-00369",
"%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
index, size);
}
} else {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
// offset needs to be a multiple of 4.
if ((offset & 0x3) != 0) {
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-offset-00295",
"%s call has push constants index %u with offset %u. Offset must be a multiple of 4.", caller_name,
index, offset);
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-offset-00368",
"%s call has push constants with offset %u. Offset must be a multiple of 4.", caller_name, offset);
} else {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
return skip;
}
enum DSL_DESCRIPTOR_GROUPS {
DSL_TYPE_SAMPLERS = 0,
DSL_TYPE_UNIFORM_BUFFERS,
DSL_TYPE_STORAGE_BUFFERS,
DSL_TYPE_SAMPLED_IMAGES,
DSL_TYPE_STORAGE_IMAGES,
DSL_TYPE_INPUT_ATTACHMENTS,
DSL_TYPE_INLINE_UNIFORM_BLOCK,
DSL_NUM_DESCRIPTOR_GROUPS
};
// Used by PreCallValiateCreatePipelineLayout.
// Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage
std::valarray<uint32_t> GetDescriptorCountMaxPerStage(
const layer_data *dev_data, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts,
bool skip_update_after_bind) {
// Identify active pipeline stages
std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT,
VK_SHADER_STAGE_COMPUTE_BIT};
if (dev_data->enabled_features.core.geometryShader) {
stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT);
}
if (dev_data->enabled_features.core.tessellationShader) {
stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
}
// Allow iteration over enum values
std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = {DSL_TYPE_SAMPLERS, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS,
DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS,
DSL_TYPE_INLINE_UNIFORM_BLOCK};
// Sum by layouts per stage, then pick max of stages per type
std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // max descriptor sum among all pipeline stages
for (auto stage : stage_flags) {
std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // per-stage sums
for (auto dsl : set_layouts) {
if (skip_update_after_bind &&
(dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
if (0 != (stage & binding->stageFlags)) {
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
// count one block per binding. descriptorCount is number of bytes
stage_sum[DSL_TYPE_INLINE_UNIFORM_BLOCK]++;
break;
default:
break;
}
}
}
}
for (auto type : dsl_groups) {
max_sum[type] = std::max(stage_sum[type], max_sum[type]);
}
}
return max_sum;
}
// Used by PreCallValidateCreatePipelineLayout.
// Returns a map indexed by VK_DESCRIPTOR_TYPE_* enum of the summed descriptors by type.
// Note: descriptors only count against the limit once even if used by multiple stages.
std::map<uint32_t, uint32_t> GetDescriptorSum(
const layer_data *dev_data, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts,
bool skip_update_after_bind) {
std::map<uint32_t, uint32_t> sum_by_type;
for (auto dsl : set_layouts) {
if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
// count one block per binding. descriptorCount is number of bytes
sum_by_type[binding->descriptorType]++;
} else {
sum_by_type[binding->descriptorType] += binding->descriptorCount;
}
}
}
return sum_by_type;
}
static bool PreCallValiateCreatePipelineLayout(const layer_data *dev_data, const VkPipelineLayoutCreateInfo *pCreateInfo) {
bool skip = false;
// Validate layout count against device physical limit
if (pCreateInfo->setLayoutCount > dev_data->phys_dev_props.limits.maxBoundDescriptorSets) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-setLayoutCount-00286",
"vkCreatePipelineLayout(): setLayoutCount (%d) exceeds physical device maxBoundDescriptorSets limit (%d).",
pCreateInfo->setLayoutCount, dev_data->phys_dev_props.limits.maxBoundDescriptorSets);
}
// Validate Push Constant ranges
uint32_t i, j;
for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
skip |= ValidatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-stageFlags-requiredbitmask",
"vkCreatePipelineLayout() call has no stageFlags set.");
}
}
// As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pPushConstantRanges-00292",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d.", i, j);
}
}
}
// Early-out
if (skip) return skip;
std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr);
unsigned int push_descriptor_set_count = 0;
{
unique_lock_t lock(global_lock); // Lock while accessing global state
for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count;
}
} // Unlock
if (push_descriptor_set_count > 1) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293",
"vkCreatePipelineLayout() Multiple push descriptor sets found.");
}
// Max descriptors by type, within a single pipeline stage
std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(dev_data, set_layouts, true);
// Samplers
if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorSamplers) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287",
"vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
"maxPerStageDescriptorSamplers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_SAMPLERS], dev_data->phys_dev_props.limits.maxPerStageDescriptorSamplers);
}
// Uniform buffers
if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288",
"vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUniformBuffers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS],
dev_data->phys_dev_props.limits.maxPerStageDescriptorUniformBuffers);
}
// Storage buffers
if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289",
"vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorStorageBuffers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS],
dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageBuffers);
}
// Sampled images
if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > dev_data->phys_dev_props.limits.maxPerStageDescriptorSampledImages) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290",
"vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
"maxPerStageDescriptorSampledImages limit (%d).",
max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES],
dev_data->phys_dev_props.limits.maxPerStageDescriptorSampledImages);
}
// Storage images
if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageImages) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291",
"vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
"maxPerStageDescriptorStorageImages limit (%d).",
max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES],
dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageImages);
}
// Input attachments
if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] >
dev_data->phys_dev_props.limits.maxPerStageDescriptorInputAttachments) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676",
"vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
"maxPerStageDescriptorInputAttachments limit (%d).",
max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS],
dev_data->phys_dev_props.limits.maxPerStageDescriptorInputAttachments);
}
// Inline uniform blocks
if (max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK] >
dev_data->phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-02214",
"vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device "
"maxPerStageDescriptorInlineUniformBlocks limit (%d).",
max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK],
dev_data->phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks);
}
// Total descriptors by type
//
std::map<uint32_t, uint32_t> sum_all_stages = GetDescriptorSum(dev_data, set_layouts, true);
// Samplers
uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
if (sum > dev_data->phys_dev_props.limits.maxDescriptorSetSamplers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677",
"vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
"maxDescriptorSetSamplers limit (%d).",
sum, dev_data->phys_dev_props.limits.maxDescriptorSetSamplers);
}
// Uniform buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678",
"vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUniformBuffers limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffers);
}
// Dynamic uniform buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679",
"vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUniformBuffersDynamic limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic);
}
// Storage buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680",
"vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageBuffers limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffers);
}
// Dynamic storage buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681",
"vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageBuffersDynamic limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic);
}
// Sampled images
sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
if (sum > dev_data->phys_dev_props.limits.maxDescriptorSetSampledImages) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682",
"vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
"maxDescriptorSetSampledImages limit (%d).",
sum, dev_data->phys_dev_props.limits.maxDescriptorSetSampledImages);
}
// Storage images
sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
if (sum > dev_data->phys_dev_props.limits.maxDescriptorSetStorageImages) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683",
"vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageImages limit (%d).",
sum, dev_data->phys_dev_props.limits.maxDescriptorSetStorageImages);
}
// Input attachments
if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > dev_data->phys_dev_props.limits.maxDescriptorSetInputAttachments) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684",
"vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
"maxDescriptorSetInputAttachments limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
dev_data->phys_dev_props.limits.maxDescriptorSetInputAttachments);
}
// Inline uniform blocks
if (sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] > dev_data->phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-02216",
"vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device "
"maxDescriptorSetInlineUniformBlocks limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
dev_data->phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks);
}
if (dev_data->extensions.vk_ext_descriptor_indexing) {
// XXX TODO: replace with correct VU messages
// Max descriptors by type, within a single pipeline stage
std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind =
GetDescriptorCountMaxPerStage(dev_data, set_layouts, false);
// Samplers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03022",
"vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers);
}
// Uniform buffers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03023",
"vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers);
}
// Storage buffers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03024",
"vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers);
}
// Sampled images
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03025",
"vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages);
}
// Storage images
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03026",
"vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages);
}
// Input attachments
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03027",
"vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments);
}
// Inline uniform blocks
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK] >
dev_data->phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-02215",
"vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK],
dev_data->phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks);
}
// Total descriptors by type, summed across all pipeline stages
//
std::map<uint32_t, uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(dev_data, set_layouts, false);
// Samplers
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
if (sum > dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036",
"vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindSamplers limit (%d).",
sum, dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers);
}
// Uniform buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03037",
"vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers);
}
// Dynamic uniform buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03038",
"vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
}
// Storage buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03039",
"vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers);
}
// Dynamic storage buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03040",
"vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
}
// Sampled images
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
if (sum > dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041",
"vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindSampledImages limit (%d).",
sum, dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages);
}
// Storage images
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
if (sum > dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03042",
"vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageImages limit (%d).",
sum, dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages);
}
// Input attachments
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03043",
"vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments);
}
// Inline uniform blocks
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] >
dev_data->phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-02217",
"vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindInlineUniformBlocks limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
dev_data->phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks);
}
}
return skip;
}
// For repeatable sorting, not very useful for "memory in range" search
struct PushConstantRangeCompare {
bool operator()(const VkPushConstantRange *lhs, const VkPushConstantRange *rhs) const {
if (lhs->offset == rhs->offset) {
if (lhs->size == rhs->size) {
// The comparison is arbitrary, but avoids false aliasing by comparing all fields.
return lhs->stageFlags < rhs->stageFlags;
}
// If the offsets are the same then sorting by the end of range is useful for validation
return lhs->size < rhs->size;
}
return lhs->offset < rhs->offset;
}
};
static PushConstantRangesDict push_constant_ranges_dict;
PushConstantRangesId GetCanonicalId(const VkPipelineLayoutCreateInfo *info) {
if (!info->pPushConstantRanges) {
// Hand back the empty entry (creating as needed)...
return push_constant_ranges_dict.look_up(PushConstantRanges());
}
// Sort the input ranges to ensure equivalent ranges map to the same id
std::set<const VkPushConstantRange *, PushConstantRangeCompare> sorted;
for (uint32_t i = 0; i < info->pushConstantRangeCount; i++) {
sorted.insert(info->pPushConstantRanges + i);
}
PushConstantRanges ranges(sorted.size());
for (const auto range : sorted) {
ranges.emplace_back(*range);
}
return push_constant_ranges_dict.look_up(std::move(ranges));
}
// Dictionary of canoncial form of the pipeline set layout of descriptor set layouts
static PipelineLayoutSetLayoutsDict pipeline_layout_set_layouts_dict;
// Dictionary of canonical form of the "compatible for set" records
static PipelineLayoutCompatDict pipeline_layout_compat_dict;
static PipelineLayoutCompatId GetCanonicalId(const uint32_t set_index, const PushConstantRangesId pcr_id,
const PipelineLayoutSetLayoutsId set_layouts_id) {
return pipeline_layout_compat_dict.look_up(PipelineLayoutCompatDef(set_index, pcr_id, set_layouts_id));
}
static void PostCallRecordCreatePipelineLayout(layer_data *dev_data, const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkPipelineLayout *pPipelineLayout) {
unique_lock_t lock(global_lock); // Lock while accessing state
PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
plNode.layout = *pPipelineLayout;
plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
PipelineLayoutSetLayoutsDef set_layouts(pCreateInfo->setLayoutCount);
for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
plNode.set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
set_layouts[i] = plNode.set_layouts[i]->GetLayoutId();
}
// Get canonical form IDs for the "compatible for set" contents
plNode.push_constant_ranges = GetCanonicalId(pCreateInfo);
auto set_layouts_id = pipeline_layout_set_layouts_dict.look_up(set_layouts);
plNode.compat_for_set.reserve(pCreateInfo->setLayoutCount);
// Create table of "compatible for set N" cannonical forms for trivial accept validation
for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
plNode.compat_for_set.emplace_back(GetCanonicalId(i, plNode.push_constant_ranges, set_layouts_id));
}
// Implicit unlock
};
VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = PreCallValiateCreatePipelineLayout(dev_data, pCreateInfo);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
if (VK_SUCCESS == result) {
PostCallRecordCreatePipelineLayout(dev_data, pCreateInfo, pPipelineLayout);
}
return result;
}
static bool PostCallValidateCreateDescriptorPool(layer_data *dev_data, VkDescriptorPool *pDescriptorPool) {
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
HandleToUint64(*pDescriptorPool), kVUID_Core_DrawState_OutOfMemory,
"Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()");
}
static void PostCallRecordCreateDescriptorPool(layer_data *dev_data, DESCRIPTOR_POOL_STATE *pNewNode,
VkDescriptorPool *pDescriptorPool) {
dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
if (VK_SUCCESS == result) {
DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
lock_guard_t lock(global_lock);
if (NULL == pNewNode) {
bool skip = PostCallValidateCreateDescriptorPool(dev_data, pDescriptorPool);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
} else {
PostCallRecordCreateDescriptorPool(dev_data, pNewNode, pDescriptorPool);
}
} else {
// Need to do anything if pool create fails?
}
return result;
}
// Validate that given pool does not store any descriptor sets used by an in-flight CmdBuffer
// pool stores the descriptor sets to be validated
// Return false if no errors occur
// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
static bool PreCallValidateResetDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool) {
if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
bool skip = false;
DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, descriptorPool);
if (pPool != nullptr) {
for (auto ds : pPool->sets) {
if (ds && ds->in_use.load()) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
HandleToUint64(descriptorPool), "VUID-vkResetDescriptorPool-descriptorPool-00313",
"It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer.");
if (skip) break;
}
}
}
return skip;
}
static void PostCallRecordResetDescriptorPool(layer_data *dev_data, VkDevice device, VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags) {
DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, descriptorPool);
// TODO: validate flags
// For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
for (auto ds : pPool->sets) {
FreeDescriptorSet(dev_data, ds);
}
pPool->sets.clear();
// Reset available count for each type and available sets for this pool
for (auto it = pPool->availableDescriptorTypeCount.begin(); it != pPool->availableDescriptorTypeCount.end(); ++it) {
pPool->availableDescriptorTypeCount[it->first] = pPool->maxDescriptorTypeCount[it->first];
}
pPool->availableSets = pPool->maxSets;
}
VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Make sure sets being destroyed are not currently in-use
bool skip = PreCallValidateResetDescriptorPool(dev_data, descriptorPool);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordResetDescriptorPool(dev_data, device, descriptorPool, flags);
lock.unlock();
}
return result;
}
// Ensure the pool contains enough descriptors and descriptor sets to satisfy
// an allocation request. Fills common_data with the total number of descriptors of each type required,
// as well as DescriptorSetLayout ptrs used for later update.
static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
cvdescriptorset::AllocateDescriptorSetsData *common_data) {
// Always update common data
cvdescriptorset::UpdateAllocateDescriptorSetsData(dev_data, pAllocateInfo, common_data);
if (dev_data->instance_data->disabled.allocate_descriptor_sets) return false;
// All state checks for AllocateDescriptorSets is done in single function
return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data, pAllocateInfo, common_data);
}
// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets,
const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
// All the updates are contained in a single cvdescriptorset function
cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
&dev_data->setMap, dev_data);
}
// TODO: PostCallRecord routine is dependent on data generated in PreCallValidate -- needs to be moved out
VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
bool skip = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
lock.unlock();
}
return result;
}
// Verify state before freeing DescriptorSets
static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
const VkDescriptorSet *descriptor_sets) {
if (dev_data->instance_data->disabled.free_descriptor_sets) return false;
bool skip = false;
// First make sure sets being destroyed are not currently in-use
for (uint32_t i = 0; i < count; ++i) {
if (descriptor_sets[i] != VK_NULL_HANDLE) {
skip |= ValidateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
}
}
DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
// Can't Free from a NON_FREE pool
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
HandleToUint64(pool), "VUID-vkFreeDescriptorSets-descriptorPool-00312",
"It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
"VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
}
return skip;
}
// Sets are being returned to the pool so update the pool state
static void PreCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
const VkDescriptorSet *descriptor_sets) {
DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
// Update available descriptor sets in pool
pool_state->availableSets += count;
// For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
for (uint32_t i = 0; i < count; ++i) {
if (descriptor_sets[i] != VK_NULL_HANDLE) {
auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
uint32_t type_index = 0, descriptor_count = 0;
for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
}
FreeDescriptorSet(dev_data, descriptor_set);
pool_state->sets.erase(descriptor_set);
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
const VkDescriptorSet *pDescriptorSets) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Make sure that no sets being destroyed are in-flight
unique_lock_t lock(global_lock);
bool skip = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
VkResult result;
if (skip) {
result = VK_ERROR_VALIDATION_FAILED_EXT;
} else {
// A race here is invalid (descriptorPool should be externally sync'd), but code defensively against an invalid race
PreCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
lock.unlock();
result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
}
return result;
}
// TODO : This is a Proof-of-concept for core validation architecture
// Really we'll want to break out these functions to separate files but
// keeping it all together here to prove out design
// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) {
if (dev_data->instance_data->disabled.update_descriptor_sets) return false;
// First thing to do is perform map look-ups.
// NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
// so we can't just do a single map look-up up-front, but do them individually in functions below
// Now make call(s) that validate state, but don't perform state updates in this function
// Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
// namespace which will parse params and make calls into specific class instances
return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
descriptorCopyCount, pDescriptorCopies);
}
// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
static void PreCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) {
cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
pDescriptorCopies);
}
VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) {
// Only map look-up at top level is for device-level layer_data
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
pDescriptorCopies);
if (!skip) {
// Since UpdateDescriptorSets() is void, nothing to check prior to updating state & we can update before call down chain
PreCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
pDescriptorCopies);
lock.unlock();
dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
pDescriptorCopies);
}
}
static void PostCallRecordAllocateCommandBuffers(layer_data *dev_data, VkDevice device,
const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
auto pPool = GetCommandPoolNode(dev_data, pCreateInfo->commandPool);
if (pPool) {
for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
// Add command buffer to its commandPool map
pPool->commandBuffers.insert(pCommandBuffer[i]);
GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
// Add command buffer to map
dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
ResetCommandBufferState(dev_data, pCommandBuffer[i]);
pCB->createInfo = *pCreateInfo;
pCB->device = device;
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
VkCommandBuffer *pCommandBuffer) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
if (VK_SUCCESS == result) {
unique_lock_t lock(global_lock);
PostCallRecordAllocateCommandBuffers(dev_data, device, pCreateInfo, pCommandBuffer);
lock.unlock();
}
return result;
}
// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
AddCommandBufferBinding(&fb_state->cb_bindings, {HandleToUint64(fb_state->framebuffer), kVulkanObjectTypeFramebuffer},
cb_state);
const uint32_t attachmentCount = fb_state->createInfo.attachmentCount;
for (uint32_t attachment = 0; attachment < attachmentCount; ++attachment) {
auto view_state = GetAttachmentImageViewState(dev_data, fb_state, attachment);
if (view_state) {
AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
}
}
}
static bool PreCallValidateBeginCommandBuffer(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state,
const VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
assert(cb_state);
bool skip = false;
if (cb_state->in_use.load()) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00049",
"Calling vkBeginCommandBuffer() on active command buffer %" PRIx64
" before it has completed. You must check command buffer fence before this call.",
HandleToUint64(commandBuffer));
}
if (cb_state->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
// Secondary Command Buffer
const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
if (!pInfo) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00051",
"vkBeginCommandBuffer(): Secondary Command Buffer (0x%" PRIx64 ") must have inheritance info.",
HandleToUint64(commandBuffer));
} else {
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
assert(pInfo->renderPass);
string errorString = "";
auto framebuffer = GetFramebufferState(dev_data, pInfo->framebuffer);
if (framebuffer) {
if (framebuffer->createInfo.renderPass != pInfo->renderPass) {
// renderPass that framebuffer was created with must be compatible with local renderPass
skip |=
ValidateRenderPassCompatibility(dev_data, "framebuffer", framebuffer->rp_state.get(), "command buffer",
GetRenderPassState(dev_data, pInfo->renderPass),
"vkBeginCommandBuffer()", "VUID-VkCommandBufferBeginInfo-flags-00055");
}
}
}
if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.core.occlusionQueryPrecise == VK_FALSE) &&
(pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00052",
"vkBeginCommandBuffer(): Secondary Command Buffer (0x%" PRIx64
") must not have VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device "
"does not support precise occlusion queries.",
HandleToUint64(commandBuffer));
}
}
if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
auto renderPass = GetRenderPassState(dev_data, pInfo->renderPass);
if (renderPass) {
if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
"VUID-VkCommandBufferBeginInfo-flags-00054",
"vkBeginCommandBuffer(): Secondary Command Buffers (0x%" PRIx64
") must have a subpass index (%d) that is less than the number of subpasses (%d).",
HandleToUint64(commandBuffer), pInfo->subpass, renderPass->createInfo.subpassCount);
}
}
}
}
if (CB_RECORDING == cb_state->state) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00049",
"vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%" PRIx64
") in the RECORDING state. Must first call vkEndCommandBuffer().",
HandleToUint64(commandBuffer));
} else if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) {
VkCommandPool cmdPool = cb_state->createInfo.commandPool;
auto pPool = GetCommandPoolNode(dev_data, cmdPool);
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00050",
"Call to vkBeginCommandBuffer() on command buffer (0x%" PRIx64
") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIx64
") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
HandleToUint64(commandBuffer), HandleToUint64(cmdPool));
}
}
return skip;
}
static void PreCallRecordBeginCommandBuffer(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo *pBeginInfo) {
assert(cb_state);
// This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
ClearCmdBufAndMemReferences(dev_data, cb_state);
if (cb_state->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
// Secondary Command Buffer
const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
if (pInfo) {
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
assert(pInfo->renderPass);
auto framebuffer = GetFramebufferState(dev_data, pInfo->framebuffer);
if (framebuffer) {
// Connect this framebuffer and its children to this cmdBuffer
AddFramebufferBinding(dev_data, cb_state, framebuffer);
}
}
}
}
if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) {
ResetCommandBufferState(dev_data, commandBuffer);
}
// Set updated state here in case implicit reset occurs above
cb_state->state = CB_RECORDING;
cb_state->beginInfo = *pBeginInfo;
if (cb_state->beginInfo.pInheritanceInfo) {
cb_state->inheritanceInfo = *(cb_state->beginInfo.pInheritanceInfo);
cb_state->beginInfo.pInheritanceInfo = &cb_state->inheritanceInfo;
// If we are a secondary command-buffer and inheriting. Update the items we should inherit.
if ((cb_state->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
(cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
cb_state->activeRenderPass = GetRenderPassState(dev_data, cb_state->beginInfo.pInheritanceInfo->renderPass);
cb_state->activeSubpass = cb_state->beginInfo.pInheritanceInfo->subpass;
cb_state->activeFramebuffer = cb_state->beginInfo.pInheritanceInfo->framebuffer;
cb_state->framebuffers.insert(cb_state->beginInfo.pInheritanceInfo->framebuffer);
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
// Validate command buffer level
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateBeginCommandBuffer(dev_data, cb_state, commandBuffer, pBeginInfo);
PreCallRecordBeginCommandBuffer(dev_data, cb_state, commandBuffer, pBeginInfo);
}
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
return result;
}
static bool PreCallValidateEndCommandBuffer(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = false;
if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == cb_state->createInfo.level) ||
!(cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
// This needs spec clarification to update valid usage, see comments in PR:
// https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/165
skip |= InsideRenderPass(dev_data, cb_state, "vkEndCommandBuffer()", "VUID-vkEndCommandBuffer-commandBuffer-00060");
}
skip |= ValidateCmd(dev_data, cb_state, CMD_ENDCOMMANDBUFFER, "vkEndCommandBuffer()");
for (auto query : cb_state->activeQueries) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkEndCommandBuffer-commandBuffer-00061",
"Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d.",
HandleToUint64(query.pool), query.index);
}
return skip;
}
static void PostCallRecordEndCommandBuffer(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkResult &result) {
// Cached validation is specific to a specific recording of a specific command buffer.
for (auto descriptor_set : cb_state->validated_descriptor_sets) {
descriptor_set->ClearCachedValidation(cb_state);
}
cb_state->validated_descriptor_sets.clear();
if (VK_SUCCESS == result) {
cb_state->state = CB_RECORDED;
}
}
VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateEndCommandBuffer(dev_data, cb_state, commandBuffer);
}
if (!skip) {
lock.unlock();
auto result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
lock.lock();
if (cb_state) {
PostCallRecordEndCommandBuffer(dev_data, cb_state, result);
}
return result;
} else {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
}
static bool PreCallValidateResetCommandBuffer(layer_data *dev_data, VkCommandBuffer commandBuffer) {
bool skip = false;
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
VkCommandPool cmdPool = pCB->createInfo.commandPool;
auto pPool = GetCommandPoolNode(dev_data, cmdPool);
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkResetCommandBuffer-commandBuffer-00046",
"Attempt to reset command buffer (0x%" PRIx64 ") created from command pool (0x%" PRIx64
") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
HandleToUint64(commandBuffer), HandleToUint64(cmdPool));
}
skip |= CheckCommandBufferInFlight(dev_data, pCB, "reset", "VUID-vkResetCommandBuffer-commandBuffer-00045");
return skip;
}
static void PostCallRecordResetCommandBuffer(layer_data *dev_data, VkCommandBuffer commandBuffer) {
ResetCommandBufferState(dev_data, commandBuffer);
}
VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateResetCommandBuffer(dev_data, commandBuffer);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordResetCommandBuffer(dev_data, commandBuffer);
lock.unlock();
}
return result;
}
static bool PreCallValidateCmdBindPipeline(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBindPipeline-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
// TODO: "VUID-vkCmdBindPipeline-pipelineBindPoint-00777" "VUID-vkCmdBindPipeline-pipelineBindPoint-00779" -- using
// ValidatePipelineBindPoint
return skip;
}
static void PreCallRecordCmdBindPipeline(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint pipelineBindPoint,
VkPipeline pipeline) {
auto pipe_state = GetPipelineState(dev_data, pipeline);
if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
cb_state->status &= ~cb_state->static_status;
cb_state->static_status = MakeStaticStateMask(pipe_state->graphicsPipelineCI.ptr()->pDynamicState);
cb_state->status |= cb_state->static_status;
}
cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
SetPipelineState(pipe_state);
AddCommandBufferBinding(&pipe_state->cb_bindings, {HandleToUint64(pipeline), kVulkanObjectTypePipeline}, cb_state);
}
VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipeline pipeline) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdBindPipeline(dev_data, cb_state);
PreCallRecordCmdBindPipeline(dev_data, cb_state, pipelineBindPoint, pipeline);
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
}
static bool PreCallValidateCmdSetViewport(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetViewport-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETVIEWPORT, "vkCmdSetViewport()");
if (cb_state->static_status & CBSTATUS_VIEWPORT_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewport-None-01221",
"vkCmdSetViewport(): pipeline was created without VK_DYNAMIC_STATE_VIEWPORT flag..");
}
return skip;
}
static void PreCallRecordCmdSetViewport(GLOBAL_CB_NODE *cb_state, uint32_t firstViewport, uint32_t viewportCount) {
cb_state->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
cb_state->status |= CBSTATUS_VIEWPORT_SET;
}
VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
const VkViewport *pViewports) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetViewport(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetViewport(pCB, firstViewport, viewportCount);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
}
static bool PreCallValidateCmdSetScissor(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetScissor-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETSCISSOR, "vkCmdSetScissor()");
if (cb_state->static_status & CBSTATUS_SCISSOR_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetScissor-None-00590",
"vkCmdSetScissor(): pipeline was created without VK_DYNAMIC_STATE_SCISSOR flag..");
}
return skip;
}
static void PreCallRecordCmdSetScissor(GLOBAL_CB_NODE *cb_state, uint32_t firstScissor, uint32_t scissorCount) {
cb_state->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
cb_state->status |= CBSTATUS_SCISSOR_SET;
}
VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
const VkRect2D *pScissors) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetScissor(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetScissor(pCB, firstScissor, scissorCount);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
}
static bool PreCallValidateCmdSetExclusiveScissorNV(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetExclusiveScissorNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetExclusiveScissorNV-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETEXCLUSIVESCISSOR, "vkCmdSetExclusiveScissorNV()");
if (cb_state->static_status & CBSTATUS_EXCLUSIVE_SCISSOR_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-None-02032",
"vkCmdSetExclusiveScissorNV(): pipeline was created without VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV flag.");
}
if (!GetEnabledFeatures(dev_data)->exclusive_scissor.exclusiveScissor) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-None-02031",
"vkCmdSetExclusiveScissorNV: The exclusiveScissor feature is disabled.");
}
return skip;
}
static void PreCallRecordCmdSetExclusiveScissorNV(GLOBAL_CB_NODE *cb_state, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount) {
// XXX TODO: We don't have VUIDs for validating that all exclusive scissors have been set.
// cb_state->exclusiveScissorMask |= ((1u << exclusiveScissorCount) - 1u) << firstExclusiveScissor;
cb_state->status |= CBSTATUS_EXCLUSIVE_SCISSOR_SET;
}
VKAPI_ATTR void VKAPI_CALL CmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount,
const VkRect2D *pExclusiveScissors) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetExclusiveScissorNV(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetExclusiveScissorNV(pCB, firstExclusiveScissor, exclusiveScissorCount);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetExclusiveScissorNV(commandBuffer, firstExclusiveScissor, exclusiveScissorCount, pExclusiveScissors);
}
static bool PreCallValidateCmdBindShadingRateImageNV(layer_data *dev_data, GLOBAL_CB_NODE *cb_state,
VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout)
{
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindShadingRateImageNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindShadingRateImageNV-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_BINDSHADINGRATEIMAGE, "vkCmdBindShadingRateImageNV()");
if (!GetEnabledFeatures(dev_data)->shading_rate_image.shadingRateImage) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBindShadingRateImageNV-None-02058",
"vkCmdBindShadingRateImageNV: The shadingRateImage feature is disabled.");
}
if (imageView != VK_NULL_HANDLE) {
auto view_state = GetImageViewState(dev_data, imageView);
auto &ivci = view_state->create_info;
if (!view_state || (ivci.viewType != VK_IMAGE_VIEW_TYPE_2D && ivci.viewType != VK_IMAGE_VIEW_TYPE_2D_ARRAY)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
HandleToUint64(imageView), "VUID-vkCmdBindShadingRateImageNV-imageView-02059",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must be a valid "
"VkImageView handle of type VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY.");
}
if (view_state && ivci.format != VK_FORMAT_R8_UINT) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
HandleToUint64(imageView), "VUID-vkCmdBindShadingRateImageNV-imageView-02060",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must have a format of VK_FORMAT_R8_UINT.");
}
const VkImageCreateInfo *ici = view_state ? &GetImageState(dev_data, view_state->create_info.image)->createInfo : nullptr;
if (ici && !(ici->usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
HandleToUint64(imageView), "VUID-vkCmdBindShadingRateImageNV-imageView-02061",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, the image must have been "
"created with VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV set.");
}
if (view_state) {
auto image_state = GetImageState(dev_data, view_state->create_info.image);
bool hit_error = false;
// XXX TODO: While the VUID says "each subresource", only the base mip level is
// actually used. Since we don't have an existing convenience function to iterate
// over all mip levels, just don't bother with non-base levels.
VkImageSubresourceRange &range = view_state->create_info.subresourceRange;
VkImageSubresourceLayers subresource = { range.aspectMask, range.baseMipLevel, range.baseArrayLayer, range.layerCount };
if (image_state) {
skip |= VerifyImageLayout(dev_data, cb_state, image_state, subresource, imageLayout,
VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV, "vkCmdCopyImage()",
"VUID-vkCmdBindShadingRateImageNV-imageLayout-02063",
"VUID-vkCmdBindShadingRateImageNV-imageView-02062", &hit_error);
}
}
}
return skip;
}
static void PreCallRecordCmdBindShadingRateImageNV(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkImageView imageView) {
if (imageView != VK_NULL_HANDLE) {
auto view_state = GetImageViewState(dev_data, imageView);
AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
}
}
VKAPI_ATTR void VKAPI_CALL CmdBindShadingRateImageNV(VkCommandBuffer commandBuffer,
VkImageView imageView,
VkImageLayout imageLayout) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdBindShadingRateImageNV(dev_data, pCB, commandBuffer, imageView, imageLayout);
if (!skip) {
PreCallRecordCmdBindShadingRateImageNV(dev_data, pCB, imageView);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdBindShadingRateImageNV(commandBuffer, imageView, imageLayout);
}
static bool PreCallValidateCmdSetViewportShadingRatePaletteNV(layer_data *dev_data, GLOBAL_CB_NODE *cb_state,
VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV* pShadingRatePalettes)
{
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetViewportShadingRatePaletteNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetViewportShadingRatePaletteNV-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETVIEWPORTSHADINGRATEPALETTE, "vkCmdSetViewportShadingRatePaletteNV()");
if (!GetEnabledFeatures(dev_data)->shading_rate_image.shadingRateImage) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02064",
"vkCmdSetViewportShadingRatePaletteNV: The shadingRateImage feature is disabled.");
}
if (cb_state->static_status & CBSTATUS_SHADING_RATE_PALETTE_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02065",
"vkCmdSetViewportShadingRatePaletteNV(): pipeline was created without VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV flag.");
}
for (uint32_t i = 0; i < viewportCount; ++i) {
auto *palette = &pShadingRatePalettes[i];
if (palette->shadingRatePaletteEntryCount == 0 ||
palette->shadingRatePaletteEntryCount > dev_data->phys_dev_ext_props.shading_rate_image_props.shadingRatePaletteSize) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071",
"vkCmdSetViewportShadingRatePaletteNV: shadingRatePaletteEntryCount must be between 1 and shadingRatePaletteSize.");
}
}
return skip;
}
static void PreCallRecordCmdSetViewportShadingRatePaletteNV(GLOBAL_CB_NODE *cb_state, uint32_t firstViewport, uint32_t viewportCount) {
// XXX TODO: We don't have VUIDs for validating that all shading rate palettes have been set.
// cb_state->shadingRatePaletteMask |= ((1u << viewportCount) - 1u) << firstViewport;
cb_state->status |= CBSTATUS_SHADING_RATE_PALETTE_SET;
}
VKAPI_ATTR void VKAPI_CALL CmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
const VkShadingRatePaletteNV* pShadingRatePalettes) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetViewportShadingRatePaletteNV(dev_data, pCB, commandBuffer, firstViewport, viewportCount, pShadingRatePalettes);
if (!skip) {
PreCallRecordCmdSetViewportShadingRatePaletteNV(pCB, firstViewport, viewportCount);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetViewportShadingRatePaletteNV(commandBuffer, firstViewport, viewportCount, pShadingRatePalettes);
}
static bool PreCallValidateCmdSetLineWidth(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetLineWidth-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()");
if (cb_state->static_status & CBSTATUS_LINE_WIDTH_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetLineWidth-None-00787",
"vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH flag.");
}
return skip;
}
static void PreCallRecordCmdSetLineWidth(GLOBAL_CB_NODE *cb_state) { cb_state->status |= CBSTATUS_LINE_WIDTH_SET; }
VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetLineWidth(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetLineWidth(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
}
static bool PreCallValidateCmdSetDepthBias(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
float depthBiasClamp) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthBias-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()");
if (cb_state->static_status & CBSTATUS_DEPTH_BIAS_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBias-None-00789",
"vkCmdSetDepthBias(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BIAS flag..");
}
if ((depthBiasClamp != 0.0) && (!dev_data->enabled_features.core.depthBiasClamp)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBias-depthBiasClamp-00790",
"vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must "
"be set to 0.0.");
}
return skip;
}
static void PreCallRecordCmdSetDepthBias(GLOBAL_CB_NODE *cb_state) { cb_state->status |= CBSTATUS_DEPTH_BIAS_SET; }
VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
float depthBiasSlopeFactor) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetDepthBias(dev_data, pCB, commandBuffer, depthBiasClamp);
if (!skip) {
PreCallRecordCmdSetDepthBias(pCB);
}
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
}
}
static bool PreCallValidateCmdSetBlendConstants(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetBlendConstants-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()");
if (cb_state->static_status & CBSTATUS_BLEND_CONSTANTS_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetBlendConstants-None-00612",
"vkCmdSetBlendConstants(): pipeline was created without VK_DYNAMIC_STATE_BLEND_CONSTANTS flag..");
}
return skip;
}
static void PreCallRecordCmdSetBlendConstants(GLOBAL_CB_NODE *cb_state) { cb_state->status |= CBSTATUS_BLEND_CONSTANTS_SET; }
VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetBlendConstants(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetBlendConstants(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
}
static bool PreCallValidateCmdSetDepthBounds(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthBounds-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()");
if (cb_state->static_status & CBSTATUS_DEPTH_BOUNDS_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBounds-None-00599",
"vkCmdSetDepthBounds(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BOUNDS flag..");
}
return skip;
}
static void PreCallRecordCmdSetDepthBounds(GLOBAL_CB_NODE *cb_state) { cb_state->status |= CBSTATUS_DEPTH_BOUNDS_SET; }
VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetDepthBounds(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetDepthBounds(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
}
static bool PreCallValidateCmdSetStencilCompareMask(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilCompareMask-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()");
if (cb_state->static_status & CBSTATUS_STENCIL_READ_MASK_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilCompareMask-None-00602",
"vkCmdSetStencilCompareMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK flag..");
}
return skip;
}
static void PreCallRecordCmdSetStencilCompareMask(GLOBAL_CB_NODE *cb_state) { cb_state->status |= CBSTATUS_STENCIL_READ_MASK_SET; }
VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t compareMask) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetStencilCompareMask(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetStencilCompareMask(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
}
static bool PreCallValidateCmdSetStencilWriteMask(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilWriteMask-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()");
if (cb_state->static_status & CBSTATUS_STENCIL_WRITE_MASK_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilWriteMask-None-00603",
"vkCmdSetStencilWriteMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_WRITE_MASK flag..");
}
return skip;
}
static void PreCallRecordCmdSetStencilWriteMask(GLOBAL_CB_NODE *cb_state) { cb_state->status |= CBSTATUS_STENCIL_WRITE_MASK_SET; }
VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetStencilWriteMask(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetStencilWriteMask(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
}
static bool PreCallValidateCmdSetStencilReference(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilReference-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()");
if (cb_state->static_status & CBSTATUS_STENCIL_REFERENCE_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilReference-None-00604",
"vkCmdSetStencilReference(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_REFERENCE flag..");
}
return skip;
}
static void PreCallRecordCmdSetStencilReference(GLOBAL_CB_NODE *cb_state) { cb_state->status |= CBSTATUS_STENCIL_REFERENCE_SET; }
VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetStencilReference(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetStencilReference(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
}
// Update pipeline_layout bind points applying the "Pipeline Layout Compatibility" rules
static void UpdateLastBoundDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
VkPipelineBindPoint pipeline_bind_point, const PIPELINE_LAYOUT_NODE *pipeline_layout,
uint32_t first_set, uint32_t set_count,
const std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets,
uint32_t dynamic_offset_count, const uint32_t *p_dynamic_offsets) {
// Defensive
assert(set_count);
if (0 == set_count) return;
assert(pipeline_layout);
if (!pipeline_layout) return;
uint32_t required_size = first_set + set_count;
const uint32_t last_binding_index = required_size - 1;
assert(last_binding_index < pipeline_layout->compat_for_set.size());
// Some useful shorthand
auto &last_bound = cb_state->lastBound[pipeline_bind_point];
auto &bound_sets = last_bound.boundDescriptorSets;
auto &dynamic_offsets = last_bound.dynamicOffsets;
auto &bound_compat_ids = last_bound.compat_id_for_set;
auto &pipe_compat_ids = pipeline_layout->compat_for_set;
const uint32_t current_size = static_cast<uint32_t>(bound_sets.size());
assert(current_size == dynamic_offsets.size());
assert(current_size == bound_compat_ids.size());
// We need this three times in this function, but nowhere else
auto push_descriptor_cleanup = [&last_bound](const cvdescriptorset::DescriptorSet *ds) -> bool {
if (ds && ds->IsPushDescriptor()) {
assert(ds == last_bound.push_descriptor_set.get());
last_bound.push_descriptor_set = nullptr;
return true;
}
return false;
};
// Clean up the "disturbed" before and after the range to be set
if (required_size < current_size) {
if (bound_compat_ids[last_binding_index] != pipe_compat_ids[last_binding_index]) {
// We're disturbing those after last, we'll shrink below, but first need to check for and cleanup the push_descriptor
for (auto set_idx = required_size; set_idx < current_size; ++set_idx) {
if (push_descriptor_cleanup(bound_sets[set_idx])) break;
}
} else {
// We're not disturbing past last, so leave the upper binding data alone.
required_size = current_size;
}
}
// We resize if we need more set entries or if those past "last" are disturbed
if (required_size != current_size) {
// TODO: put these size tied things in a struct (touches many lines)
bound_sets.resize(required_size);
dynamic_offsets.resize(required_size);
bound_compat_ids.resize(required_size);
}
// For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
for (uint32_t set_idx = 0; set_idx < first_set; ++set_idx) {
if (bound_compat_ids[set_idx] != pipe_compat_ids[set_idx]) {
push_descriptor_cleanup(bound_sets[set_idx]);
bound_sets[set_idx] = nullptr;
dynamic_offsets[set_idx].clear();
bound_compat_ids[set_idx] = pipe_compat_ids[set_idx];
}
}
// Now update the bound sets with the input sets
const uint32_t *input_dynamic_offsets = p_dynamic_offsets; // "read" pointer for dynamic offset data
for (uint32_t input_idx = 0; input_idx < set_count; input_idx++) {
auto set_idx = input_idx + first_set; // set_idx is index within layout, input_idx is index within input descriptor sets
cvdescriptorset::DescriptorSet *descriptor_set = descriptor_sets[input_idx];
// Record binding (or push)
push_descriptor_cleanup(bound_sets[set_idx]);
bound_sets[set_idx] = descriptor_set;
bound_compat_ids[set_idx] = pipe_compat_ids[set_idx]; // compat ids are canonical *per* set index
if (descriptor_set) {
auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
// TODO: Add logic for tracking push_descriptor offsets (here or in caller)
if (set_dynamic_descriptor_count && input_dynamic_offsets) {
const uint32_t *end_offset = input_dynamic_offsets + set_dynamic_descriptor_count;
dynamic_offsets[set_idx] = std::vector<uint32_t>(input_dynamic_offsets, end_offset);
input_dynamic_offsets = end_offset;
assert(input_dynamic_offsets <= (p_dynamic_offsets + dynamic_offset_count));
} else {
dynamic_offsets[set_idx].clear();
}
if (!descriptor_set->IsPushDescriptor()) {
// Can't cache validation of push_descriptors
cb_state->validated_descriptor_sets.insert(descriptor_set);
}
}
}
}
// Update the bound state for the bind point, including the effects of incompatible pipeline layouts
static void PreCallRecordCmdBindDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet,
uint32_t setCount, const VkDescriptorSet *pDescriptorSets,
uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) {
auto pipeline_layout = GetPipelineLayout(device_data, layout);
std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets;
descriptor_sets.reserve(setCount);
// Construct a list of the descriptors
bool found_non_null = false;
for (uint32_t i = 0; i < setCount; i++) {
cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(device_data, pDescriptorSets[i]);
descriptor_sets.emplace_back(descriptor_set);
found_non_null |= descriptor_set != nullptr;
}
if (found_non_null) { // which implies setCount > 0
UpdateLastBoundDescriptorSets(device_data, cb_state, pipelineBindPoint, pipeline_layout, firstSet, setCount,
descriptor_sets, dynamicOffsetCount, pDynamicOffsets);
cb_state->lastBound[pipelineBindPoint].pipeline_layout = layout;
}
}
static bool PreCallValidateCmdBindDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet,
uint32_t setCount, const VkDescriptorSet *pDescriptorSets,
uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) {
bool skip = false;
skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBindDescriptorSets-commandBuffer-cmdpool");
skip |= ValidateCmd(device_data, cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
// Track total count of dynamic descriptor types to make sure we have an offset for each one
uint32_t total_dynamic_descriptors = 0;
string error_string = "";
uint32_t last_set_index = firstSet + setCount - 1;
if (last_set_index >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(last_set_index + 1);
cb_state->lastBound[pipelineBindPoint].compat_id_for_set.resize(last_set_index + 1);
}
auto pipeline_layout = GetPipelineLayout(device_data, layout);
for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(device_data, pDescriptorSets[set_idx]);
if (descriptor_set) {
if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
skip |= log_msg(
device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(pDescriptorSets[set_idx]), kVUID_Core_DrawState_DescriptorSetNotUpdated,
"Descriptor Set 0x%" PRIx64 " bound but it was never updated. You may want to either update it or not bind it.",
HandleToUint64(pDescriptorSets[set_idx]));
}
// Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
if (!VerifySetLayoutCompatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
skip |=
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(pDescriptorSets[set_idx]), "VUID-vkCmdBindDescriptorSets-pDescriptorSets-00358",
"descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout at index %u of "
"pipelineLayout 0x%" PRIx64 " due to: %s.",
set_idx, set_idx + firstSet, HandleToUint64(layout), error_string.c_str());
}
auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
if (set_dynamic_descriptor_count) {
// First make sure we won't overstep bounds of pDynamicOffsets array
if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
kVUID_Core_DrawState_InvalidDynamicOffsetCount,
"descriptorSet #%u (0x%" PRIx64
") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets array. "
"There must be one dynamic offset for each dynamic descriptor being bound.",
set_idx, HandleToUint64(pDescriptorSets[set_idx]), descriptor_set->GetDynamicDescriptorCount(),
(dynamicOffsetCount - total_dynamic_descriptors));
} else { // Validate dynamic offsets and Dynamic Offset Minimums
uint32_t cur_dyn_offset = total_dynamic_descriptors;
for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
if (SafeModulo(pDynamicOffsets[cur_dyn_offset],
device_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
0) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
"VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01971",
"vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
"device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
device_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
}
cur_dyn_offset++;
} else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
if (SafeModulo(pDynamicOffsets[cur_dyn_offset],
device_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
0) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
"VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01972",
"vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
"device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
device_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
}
cur_dyn_offset++;
}
}
// Keep running total of dynamic descriptor count to verify at the end
total_dynamic_descriptors += set_dynamic_descriptor_count;
}
}
} else {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(pDescriptorSets[set_idx]), kVUID_Core_DrawState_InvalidSet,
"Attempt to bind descriptor set 0x%" PRIx64 " that doesn't exist!",
HandleToUint64(pDescriptorSets[set_idx]));
}
}
// dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
if (total_dynamic_descriptors != dynamicOffsetCount) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359",
"Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount is %u. It should "
"exactly match the number of dynamic descriptors.",
setCount, total_dynamic_descriptors, dynamicOffsetCount);
}
return skip;
}
VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
const uint32_t *pDynamicOffsets) {
bool skip = false;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
skip = PreCallValidateCmdBindDescriptorSets(device_data, cb_state, pipelineBindPoint, layout, firstSet, setCount,
pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
if (!skip) {
PreCallRecordCmdBindDescriptorSets(device_data, cb_state, pipelineBindPoint, layout, firstSet, setCount, pDescriptorSets,
dynamicOffsetCount, pDynamicOffsets);
lock.unlock();
device_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
} else {
lock.unlock();
}
}
// Validates that the supplied bind point is supported for the command buffer (vis. the command pool)
// Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint
// TODO add vkCmdBindPipeline bind_point validation using this call.
bool ValidatePipelineBindPoint(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
const char *func_name, const std::map<VkPipelineBindPoint, std::string> &bind_errors) {
bool skip = false;
auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool
static const std::map<VkPipelineBindPoint, VkQueueFlags> flag_mask = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT)),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, static_cast<VkQueueFlags>(VK_QUEUE_COMPUTE_BIT)),
std::make_pair(VK_PIPELINE_BIND_POINT_RAYTRACING_NVX,
static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)),
};
const auto &qfp = GetPhysDevProperties(device_data)->queue_family_properties[pool->queueFamilyIndex];
if (0 == (qfp.queueFlags & flag_mask.at(bind_point))) {
const std::string error = bind_errors.at(bind_point);
auto cb_u64 = HandleToUint64(cb_state->commandBuffer);
auto cp_u64 = HandleToUint64(cb_state->createInfo.commandPool);
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
cb_u64, error,
"%s: CommandBuffer 0x%" PRIxLEAST64 " was allocated from VkCommandPool 0x%" PRIxLEAST64
" that does not support bindpoint %s.",
func_name, cb_u64, cp_u64, string_VkPipelineBindPoint(bind_point));
}
}
return skip;
}
static bool PreCallValidateCmdPushDescriptorSetKHR(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
const VkPipelineBindPoint bind_point, const VkPipelineLayout layout,
const uint32_t set, const uint32_t descriptor_write_count,
const VkWriteDescriptorSet *descriptor_writes, const char *func_name) {
bool skip = false;
skip |= ValidateCmd(device_data, cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name);
skip |= ValidateCmdQueueFlags(device_data, cb_state, func_name, (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT),
"VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool");
static const std::map<VkPipelineBindPoint, std::string> bind_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAYTRACING_NVX, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363")};
skip |= ValidatePipelineBindPoint(device_data, cb_state, bind_point, func_name, bind_errors);
auto layout_data = GetPipelineLayout(device_data, layout);
// Validate the set index points to a push descriptor set and is in range
if (layout_data) {
const auto &set_layouts = layout_data->set_layouts;
const auto layout_u64 = HandleToUint64(layout);
if (set < set_layouts.size()) {
const auto *dsl = set_layouts[set].get();
if (dsl && (0 == (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR))) {
skip =
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_u64, "VUID-vkCmdPushDescriptorSetKHR-set-00365",
"%s: Set index %" PRIu32
" does not match push descriptor set layout index for VkPipelineLayout 0x%" PRIxLEAST64 ".",
func_name, set, layout_u64);
}
} else {
skip = log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
layout_u64, "VUID-vkCmdPushDescriptorSetKHR-set-00364",
"%s: Set index %" PRIu32 " is outside of range for VkPipelineLayout 0x%" PRIxLEAST64 " (set < %" PRIu32
").",
func_name, set, layout_u64, static_cast<uint32_t>(set_layouts.size()));
}
}
return skip;
}
static void PreCallRecordCmdPushDescriptorSetKHR(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set,
uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) {
const auto &pipeline_layout = GetPipelineLayout(device_data, layout);
if (!pipeline_layout) return;
std::unique_ptr<cvdescriptorset::DescriptorSet> new_desc{
new cvdescriptorset::DescriptorSet(0, 0, pipeline_layout->set_layouts[set], 0, device_data)};
std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets = {new_desc.get()};
UpdateLastBoundDescriptorSets(device_data, cb_state, pipelineBindPoint, pipeline_layout, set, 1, descriptor_sets, 0, nullptr);
cb_state->lastBound[pipelineBindPoint].push_descriptor_set = std::move(new_desc);
cb_state->lastBound[pipelineBindPoint].pipeline_layout = layout;
}
VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_state = GetCBNode(device_data, commandBuffer);
bool skip = PreCallValidateCmdPushDescriptorSetKHR(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount,
pDescriptorWrites, "vkCmdPushDescriptorSetKHR()");
if (!skip) {
PreCallRecordCmdPushDescriptorSetKHR(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount,
pDescriptorWrites);
lock.unlock();
device_data->dispatch_table.CmdPushDescriptorSetKHR(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount,
pDescriptorWrites);
}
}
static VkDeviceSize GetIndexAlignment(VkIndexType indexType) {
switch (indexType) {
case VK_INDEX_TYPE_UINT16:
return 2;
case VK_INDEX_TYPE_UINT32:
return 4;
default:
// Not a real index type. Express no alignment requirement here; we expect upper layer
// to have already picked up on the enum being nonsense.
return 1;
}
}
static bool PreCallValidateCmdBindIndexBuffer(layer_data *dev_data, BUFFER_STATE *buffer_state, GLOBAL_CB_NODE *cb_node,
VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkIndexType indexType) {
bool skip = ValidateBufferUsageFlags(dev_data, buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true,
"VUID-vkCmdBindIndexBuffer-buffer-00433", "vkCmdBindIndexBuffer()",
"VK_BUFFER_USAGE_INDEX_BUFFER_BIT");
skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindIndexBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
skip |=
ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", "VUID-vkCmdBindIndexBuffer-buffer-00434");
auto offset_align = GetIndexAlignment(indexType);
if (offset % offset_align) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBindIndexBuffer-offset-00432",
"vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
string_VkIndexType(indexType));
}
return skip;
}
static void PreCallRecordCmdBindIndexBuffer(BUFFER_STATE *buffer_state, GLOBAL_CB_NODE *cb_node, VkBuffer buffer,
VkDeviceSize offset, VkIndexType indexType) {
cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
cb_node->index_buffer_binding.buffer = buffer;
cb_node->index_buffer_binding.size = buffer_state->createInfo.size;
cb_node->index_buffer_binding.offset = offset;
cb_node->index_buffer_binding.index_type = indexType;
}
VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkIndexType indexType) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto buffer_state = GetBufferState(dev_data, buffer);
auto cb_node = GetCBNode(dev_data, commandBuffer);
assert(cb_node);
assert(buffer_state);
PreCallValidateCmdBindIndexBuffer(dev_data, buffer_state, cb_node, commandBuffer, buffer, offset, indexType);
if (skip) return;
PreCallRecordCmdBindIndexBuffer(buffer_state, cb_node, buffer, offset, indexType);
lock.unlock();
dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
}
static inline void UpdateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->draw_data.push_back(pCB->current_draw_data); }
static bool PreCallValidateCmdBindVertexBuffers(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, uint32_t bindingCount,
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindVertexBuffers-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()");
for (uint32_t i = 0; i < bindingCount; ++i) {
auto buffer_state = GetBufferState(dev_data, pBuffers[i]);
assert(buffer_state);
skip |= ValidateBufferUsageFlags(dev_data, buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true,
"VUID-vkCmdBindVertexBuffers-pBuffers-00627", "vkCmdBindVertexBuffers()",
"VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()",
"VUID-vkCmdBindVertexBuffers-pBuffers-00628");
if (pOffsets[i] >= buffer_state->createInfo.size) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-vkCmdBindVertexBuffers-pOffsets-00626",
"vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]);
}
}
return skip;
}
static void PreCallRecordCmdBindVertexBuffers(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount,
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
uint32_t end = firstBinding + bindingCount;
if (pCB->current_draw_data.vertex_buffer_bindings.size() < end) {
pCB->current_draw_data.vertex_buffer_bindings.resize(end);
}
for (uint32_t i = 0; i < bindingCount; ++i) {
auto &vertex_buffer_binding = pCB->current_draw_data.vertex_buffer_bindings[i + firstBinding];
vertex_buffer_binding.buffer = pBuffers[i];
vertex_buffer_binding.offset = pOffsets[i];
}
}
VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(dev_data, commandBuffer);
assert(cb_node);
skip |= PreCallValidateCmdBindVertexBuffers(dev_data, cb_node, bindingCount, pBuffers, pOffsets);
if (skip) return;
PreCallRecordCmdBindVertexBuffers(cb_node, firstBinding, bindingCount, pBuffers, pOffsets);
lock.unlock();
dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
}
// Generic function to handle validation for all CmdDraw* type functions
static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller, VkQueueFlags queue_flags,
const std::string &queue_flag_code, const std::string &renderpass_msg_code,
const std::string &pipebound_msg_code, const std::string &dynamic_state_msg_code) {
bool skip = false;
*cb_state = GetCBNode(dev_data, cmd_buffer);
if (*cb_state) {
skip |= ValidateCmdQueueFlags(dev_data, *cb_state, caller, queue_flags, queue_flag_code);
skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
skip |= ValidateCmdBufDrawState(dev_data, *cb_state, cmd_type, indexed, bind_point, caller, pipebound_msg_code,
dynamic_state_msg_code);
skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
? OutsideRenderPass(dev_data, *cb_state, caller, renderpass_msg_code)
: InsideRenderPass(dev_data, *cb_state, caller, renderpass_msg_code);
}
return skip;
}
// Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
UpdateDrawState(dev_data, cb_state, bind_point);
}
// Generic function to handle state update for all CmdDraw* type functions
static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
UpdateResourceTrackingOnDraw(cb_state);
cb_state->hasDrawCmd = true;
// Add descriptor image/CIS layouts to CB layout map
auto &desc_sets = cb_state->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].boundDescriptorSets;
for (auto &desc : desc_sets) {
if (desc) {
desc->UpdateDSImageLayoutState(cb_state);
}
}
}
static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
GLOBAL_CB_NODE **cb_state, const char *caller) {
return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdDraw-commandBuffer-cmdpool", "VUID-vkCmdDraw-renderpass", "VUID-vkCmdDraw-None-00442",
"VUID-vkCmdDraw-None-00443");
}
static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
}
VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
uint32_t firstVertex, uint32_t firstInstance) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
lock.lock();
PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
lock.unlock();
}
}
static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller,
uint32_t indexCount, uint32_t firstIndex) {
bool skip =
ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdDrawIndexed-commandBuffer-cmdpool", "VUID-vkCmdDrawIndexed-renderpass",
"VUID-vkCmdDrawIndexed-None-00461", "VUID-vkCmdDrawIndexed-None-00462");
if (!skip && ((*cb_state)->status & CBSTATUS_INDEX_BUFFER_BOUND)) {
unsigned int index_size = 0;
const auto &index_buffer_binding = (*cb_state)->index_buffer_binding;
if (index_buffer_binding.index_type == VK_INDEX_TYPE_UINT16) {
index_size = 2;
} else if (index_buffer_binding.index_type == VK_INDEX_TYPE_UINT32) {
index_size = 4;
}
VkDeviceSize end_offset = (index_size * ((VkDeviceSize)firstIndex + indexCount)) + index_buffer_binding.offset;
if (end_offset > index_buffer_binding.size) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(index_buffer_binding.buffer), "VUID-vkCmdDrawIndexed-indexSize-00463",
"vkCmdDrawIndexed() index size (%d) * (firstIndex (%d) + indexCount (%d)) "
"+ binding offset (%" PRIuLEAST64 ") = an ending offset of %" PRIuLEAST64
" bytes, "
"which is greater than the index buffer size (%" PRIuLEAST64 ").",
index_size, firstIndex, indexCount, index_buffer_binding.offset, end_offset, index_buffer_binding.size);
}
}
return skip;
}
static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
}
VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
"vkCmdDrawIndexed()", indexCount, firstIndex);
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
lock.lock();
PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
lock.unlock();
}
}
static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
const char *caller) {
bool skip =
ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdDrawIndirect-commandBuffer-cmdpool", "VUID-vkCmdDrawIndirect-renderpass",
"VUID-vkCmdDrawIndirect-None-00485", "VUID-vkCmdDrawIndirect-None-00486");
*buffer_state = GetBufferState(dev_data, buffer);
skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, "VUID-vkCmdDrawIndirect-buffer-00474");
// TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
// VkDrawIndirectCommand structures accessed by this command must be 0, which will require access to the contents of 'buffer'.
return skip;
}
static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
BUFFER_STATE *buffer_state) {
UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
}
VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
uint32_t stride) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
&buffer_state, "vkCmdDrawIndirect()");
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
lock.lock();
PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
lock.unlock();
}
}
static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
BUFFER_STATE **buffer_state, const char *caller) {
bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdDrawIndexedIndirect-commandBuffer-cmdpool",
"VUID-vkCmdDrawIndexedIndirect-renderpass", "VUID-vkCmdDrawIndexedIndirect-None-00537",
"VUID-vkCmdDrawIndexedIndirect-None-00538");
*buffer_state = GetBufferState(dev_data, buffer);
skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, "VUID-vkCmdDrawIndexedIndirect-buffer-00526");
// TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
// VkDrawIndexedIndirectCommand structures accessed by this command must be 0, which will require access to the contents of
// 'buffer'.
return skip;
}
static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
BUFFER_STATE *buffer_state) {
UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
}
VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
uint32_t count, uint32_t stride) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
&cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
lock.lock();
PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
lock.unlock();
}
}
static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdDispatch-commandBuffer-cmdpool", "VUID-vkCmdDispatch-renderpass",
"VUID-vkCmdDispatch-None-00391", kVUIDUndefined);
}
static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
}
VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
unique_lock_t lock(global_lock);
bool skip =
PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
lock.lock();
PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
lock.unlock();
}
}
static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
BUFFER_STATE **buffer_state, const char *caller) {
bool skip =
ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdDispatchIndirect-commandBuffer-cmdpool", "VUID-vkCmdDispatchIndirect-renderpass",
"VUID-vkCmdDispatchIndirect-None-00404", kVUIDUndefined);
*buffer_state = GetBufferState(dev_data, buffer);
skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, "VUID-vkCmdDispatchIndirect-buffer-00401");
return skip;
}
static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
BUFFER_STATE *buffer_state) {
UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
}
VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
&cb_state, &buffer_state, "vkCmdDispatchIndirect()");
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
lock.lock();
PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
lock.unlock();
}
}
VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
uint32_t regionCount, const VkBufferCopy *pRegions) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_buffer_state = GetBufferState(device_data, srcBuffer);
auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
if (cb_node && src_buffer_state && dst_buffer_state) {
bool skip = PreCallValidateCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
if (!skip) {
PreCallRecordCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
lock.unlock();
device_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
}
} else {
lock.unlock();
assert(0);
}
}
VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageCopy *pRegions) {
bool skip = false;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_image_state = GetImageState(device_data, srcImage);
auto dst_image_state = GetImageState(device_data, dstImage);
if (cb_node && src_image_state && dst_image_state) {
skip = PreCallValidateCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
srcImageLayout, dstImageLayout);
if (!skip) {
PreCallRecordCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
dstImageLayout);
lock.unlock();
device_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions);
}
} else {
lock.unlock();
assert(0);
}
}
// Validate that an image's sampleCount matches the requirement for a specific API call
bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
const char *location, const std::string &msgCode) {
bool skip = false;
if (image_state->createInfo.samples != sample_count) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), msgCode,
"%s for image 0x%" PRIx64 " was created with a sample count of %s but must be %s.", location,
HandleToUint64(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
string_VkSampleCountFlagBits(sample_count));
}
return skip;
}
VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageBlit *pRegions, VkFilter filter) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(dev_data, commandBuffer);
auto src_image_state = GetImageState(dev_data, srcImage);
auto dst_image_state = GetImageState(dev_data, dstImage);
bool skip = PreCallValidateCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
srcImageLayout, dstImageLayout, filter);
if (!skip) {
PreCallRecordCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
dstImageLayout);
lock.unlock();
dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions, filter);
}
}
VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkBufferImageCopy *pRegions) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_buffer_state = GetBufferState(device_data, srcBuffer);
auto dst_image_state = GetImageState(device_data, dstImage);
if (cb_node && src_buffer_state && dst_image_state) {
skip = PreCallValidateCmdCopyBufferToImage(device_data, dstImageLayout, cb_node, src_buffer_state, dst_image_state,
regionCount, pRegions, "vkCmdCopyBufferToImage()");
} else {
lock.unlock();
assert(0);
// TODO: report VU01244 here, or put in object tracker?
}
if (!skip) {
PreCallRecordCmdCopyBufferToImage(device_data, cb_node, src_buffer_state, dst_image_state, regionCount, pRegions,
dstImageLayout);
lock.unlock();
device_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
}
}
VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
bool skip = false;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_image_state = GetImageState(device_data, srcImage);
auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
if (cb_node && src_image_state && dst_buffer_state) {
skip = PreCallValidateCmdCopyImageToBuffer(device_data, srcImageLayout, cb_node, src_image_state, dst_buffer_state,
regionCount, pRegions, "vkCmdCopyImageToBuffer()");
} else {
lock.unlock();
assert(0);
// TODO: report VU01262 here, or put in object tracker?
}
if (!skip) {
PreCallRecordCmdCopyImageToBuffer(device_data, cb_node, src_image_state, dst_buffer_state, regionCount, pRegions,
srcImageLayout);
lock.unlock();
device_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
}
}
static bool PreCallCmdUpdateBuffer(layer_data *device_data, const GLOBAL_CB_NODE *cb_state, const BUFFER_STATE *dst_buffer_state) {
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(device_data, dst_buffer_state, "vkCmdUpdateBuffer()",
"VUID-vkCmdUpdateBuffer-dstBuffer-00035");
// Validate that DST buffer has correct usage flags set
skip |= ValidateBufferUsageFlags(device_data, dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdUpdateBuffer-dstBuffer-00034", "vkCmdUpdateBuffer()",
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdUpdateBuffer()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdUpdateBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(device_data, cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
skip |= InsideRenderPass(device_data, cb_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-renderpass");
return skip;
}
static void PostCallRecordCmdUpdateBuffer(layer_data *device_data, GLOBAL_CB_NODE *cb_state, BUFFER_STATE *dst_buffer_state) {
// Update bindings between buffer and cmd buffer
AddCommandBufferBindingBuffer(device_data, cb_state, dst_buffer_state);
}
VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize dataSize, const uint32_t *pData) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_state = GetCBNode(dev_data, commandBuffer);
assert(cb_state);
auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
assert(dst_buff_state);
skip |= PreCallCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
lock.lock();
PostCallRecordCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
lock.unlock();
}
}
VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize size, uint32_t data) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto buffer_state = GetBufferState(device_data, dstBuffer);
if (cb_node && buffer_state) {
bool skip = PreCallValidateCmdFillBuffer(device_data, cb_node, buffer_state);
if (!skip) {
PreCallRecordCmdFillBuffer(device_data, cb_node, buffer_state);
lock.unlock();
device_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
}
} else {
lock.unlock();
assert(0);
}
}
VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
const VkClearAttachment *pAttachments, uint32_t rectCount,
const VkClearRect *pRects) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
{
lock_guard_t lock(global_lock);
skip = PreCallValidateCmdClearAttachments(dev_data, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
}
if (!skip) dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
}
VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearColorValue *pColor, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdClearColorImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
if (!skip) {
PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
lock.unlock();
dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
}
}
VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdClearDepthStencilImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
if (!skip) {
PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
lock.unlock();
dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
}
}
VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageResolve *pRegions) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(dev_data, commandBuffer);
auto src_image_state = GetImageState(dev_data, srcImage);
auto dst_image_state = GetImageState(dev_data, dstImage);
bool skip = PreCallValidateCmdResolveImage(dev_data, cb_node, src_image_state, srcImageLayout, dst_image_state, dstImageLayout,
regionCount, pRegions);
if (!skip) {
PreCallRecordCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state);
lock.unlock();
dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions);
}
}
VKAPI_ATTR void VKAPI_CALL GetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
VkSubresourceLayout *pLayout) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetImageSubresourceLayout(device_data, image, pSubresource);
if (!skip) {
lock.unlock();
device_data->dispatch_table.GetImageSubresourceLayout(device, image, pSubresource, pLayout);
}
}
bool SetEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
pCB->eventToStageMap[event] = stageMask;
}
auto queue_data = dev_data->queueMap.find(queue);
if (queue_data != dev_data->queueMap.end()) {
queue_data->second.eventToStageMap[event] = stageMask;
}
return false;
}
static bool PreCallValidateCmdSetEvent(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineStageFlags stageMask) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdSetEvent-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETEVENT, "vkCmdSetEvent()");
skip |= InsideRenderPass(dev_data, cb_state, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-renderpass");
skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdSetEvent()",
"VUID-vkCmdSetEvent-stageMask-01150",
"VUID-vkCmdSetEvent-stageMask-01151",
"VUID-vkCmdSetEvent-stageMask-02107",
"VUID-vkCmdSetEvent-stageMask-02108");
return skip;
}
static void PreCallRecordCmdSetEvent(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer, VkEvent event,
VkPipelineStageFlags stageMask) {
auto event_state = GetEventNode(dev_data, event);
if (event_state) {
AddCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, cb_state);
event_state->cb_bindings.insert(cb_state);
}
cb_state->events.push_back(event);
if (!cb_state->waitedEvents.count(event)) {
cb_state->writeEventsBeforeWait.push_back(event);
}
cb_state->eventUpdates.emplace_back([=](VkQueue q) { return SetEventStageMask(q, commandBuffer, event, stageMask); });
}
VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetEvent(dev_data, pCB, stageMask);
PreCallRecordCmdSetEvent(dev_data, pCB, commandBuffer, event, stageMask);
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
}
static bool PreCallValidateCmdResetEvent(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineStageFlags stageMask) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdResetEvent-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_RESETEVENT, "vkCmdResetEvent()");
skip |= InsideRenderPass(dev_data, cb_state, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-renderpass");
skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdResetEvent()",
"VUID-vkCmdResetEvent-stageMask-01154",
"VUID-vkCmdResetEvent-stageMask-01155",
"VUID-vkCmdResetEvent-stageMask-02109",
"VUID-vkCmdResetEvent-stageMask-02110");
return skip;
}
static void PreCallRecordCmdResetEvent(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
VkEvent event) {
auto event_state = GetEventNode(dev_data, event);
if (event_state) {
AddCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, cb_state);
event_state->cb_bindings.insert(cb_state);
}
cb_state->events.push_back(event);
if (!cb_state->waitedEvents.count(event)) {
cb_state->writeEventsBeforeWait.push_back(event);
}
// TODO : Add check for "VUID-vkResetEvent-event-01148"
cb_state->eventUpdates.emplace_back(
[=](VkQueue q) { return SetEventStageMask(q, commandBuffer, event, VkPipelineStageFlags(0)); });
}
VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdResetEvent(dev_data, pCB, stageMask);
PreCallRecordCmdResetEvent(dev_data, pCB, commandBuffer, event);
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
}
// Return input pipeline stage flags, expanded for individual bits if VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT is set
static VkPipelineStageFlags ExpandPipelineStageFlags(VkPipelineStageFlags inflags) {
if (~inflags & VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) return inflags;
return (inflags & ~VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) |
(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT | VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV |
VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV);
}
// Verify image barrier image state and that the image is consistent with FB image
static bool ValidateImageBarrierImage(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE const *cb_state,
VkFramebuffer framebuffer, uint32_t active_subpass, const safe_VkSubpassDescription &sub_desc,
uint64_t rp_handle, uint32_t img_index, const VkImageMemoryBarrier &img_barrier) {
bool skip = false;
const auto &fb_state = GetFramebufferState(device_data, framebuffer);
assert(fb_state);
const auto img_bar_image = img_barrier.image;
bool image_match = false;
bool sub_image_found = false; // Do we find a corresponding subpass description
VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED;
uint32_t attach_index = 0;
// Verify that a framebuffer image matches barrier image
const auto attachmentCount = fb_state->createInfo.attachmentCount;
for (uint32_t attachment = 0; attachment < attachmentCount; ++attachment) {
auto view_state = GetAttachmentImageViewState(device_data, fb_state, attachment);
if (view_state && (img_bar_image == view_state->create_info.image)) {
image_match = true;
attach_index = attachment;
break;
}
}
if (image_match) { // Make sure subpass is referring to matching attachment
if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) {
sub_image_layout = sub_desc.pDepthStencilAttachment->layout;
sub_image_found = true;
} else {
for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) {
if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) {
sub_image_layout = sub_desc.pColorAttachments[j].layout;
sub_image_found = true;
break;
} else if (sub_desc.pResolveAttachments && sub_desc.pResolveAttachments[j].attachment == attach_index) {
sub_image_layout = sub_desc.pResolveAttachments[j].layout;
sub_image_found = true;
break;
}
}
}
if (!sub_image_found) {
skip |= log_msg(
device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
"VUID-vkCmdPipelineBarrier-image-01179",
"%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
") is not referenced by the VkSubpassDescription for active subpass (%d) of current renderPass (0x%" PRIx64 ").",
funcName, img_index, HandleToUint64(img_bar_image), active_subpass, rp_handle);
}
} else { // !image_match
auto const fb_handle = HandleToUint64(fb_state->framebuffer);
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
fb_handle, "VUID-vkCmdPipelineBarrier-image-01179",
"%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
") does not match an image from the current framebuffer (0x%" PRIx64 ").",
funcName, img_index, HandleToUint64(img_bar_image), fb_handle);
}
if (img_barrier.oldLayout != img_barrier.newLayout) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-oldLayout-01181",
"%s: As the Image Barrier for image 0x%" PRIx64
" is being executed within a render pass instance, oldLayout must equal newLayout yet they are %s and %s.",
funcName, HandleToUint64(img_barrier.image), string_VkImageLayout(img_barrier.oldLayout),
string_VkImageLayout(img_barrier.newLayout));
} else {
if (sub_image_found && sub_image_layout != img_barrier.oldLayout) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle, "VUID-vkCmdPipelineBarrier-oldLayout-01180",
"%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
") is referenced by the VkSubpassDescription for active subpass (%d) of current renderPass (0x%" PRIx64
") as having layout %s, but image barrier has layout %s.",
funcName, img_index, HandleToUint64(img_bar_image), active_subpass, rp_handle,
string_VkImageLayout(sub_image_layout), string_VkImageLayout(img_barrier.oldLayout));
}
}
return skip;
}
// Validate image barriers within a renderPass
static bool ValidateRenderPassImageBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
uint32_t active_subpass, const safe_VkSubpassDescription &sub_desc, uint64_t rp_handle,
const VkSubpassDependency *dependencies, const std::vector<uint32_t> &self_dependencies,
uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) {
bool skip = false;
for (uint32_t i = 0; i < image_mem_barrier_count; ++i) {
const auto &img_barrier = image_barriers[i];
const auto &img_src_access_mask = img_barrier.srcAccessMask;
const auto &img_dst_access_mask = img_barrier.dstAccessMask;
bool access_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
access_mask_match = (img_src_access_mask == (sub_dep.srcAccessMask & img_src_access_mask)) &&
(img_dst_access_mask == (sub_dep.dstAccessMask & img_dst_access_mask));
if (access_mask_match) break;
}
if (!access_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02024",
"%s: Barrier pImageMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
"srcAccessMask of subpass %d of renderPass 0x%" PRIx64
". Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, img_src_access_mask, active_subpass, rp_handle, self_dep_ss.str().c_str());
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02024",
"%s: Barrier pImageMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
"dstAccessMask of subpass %d of renderPass 0x%" PRIx64
". Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, img_dst_access_mask, active_subpass, rp_handle, self_dep_ss.str().c_str());
}
if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182",
"%s: Barrier pImageMemoryBarriers[%d].srcQueueFamilyIndex is %d and "
"pImageMemoryBarriers[%d].dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.",
funcName, i, img_barrier.srcQueueFamilyIndex, i, img_barrier.dstQueueFamilyIndex);
}
// Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known
if (VK_NULL_HANDLE == cb_state->activeFramebuffer) {
assert(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level);
// Secondary CB case w/o FB specified delay validation
cb_state->cmd_execute_commands_functions.emplace_back([=](GLOBAL_CB_NODE *primary_cb, VkFramebuffer fb) {
return ValidateImageBarrierImage(device_data, funcName, cb_state, fb, active_subpass, sub_desc, rp_handle, i,
img_barrier);
});
} else {
skip |= ValidateImageBarrierImage(device_data, funcName, cb_state, cb_state->activeFramebuffer, active_subpass,
sub_desc, rp_handle, i, img_barrier);
}
}
return skip;
}
// Validate VUs for Pipeline Barriers that are within a renderPass
// Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
static bool ValidateRenderPassPipelineBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
const VkBufferMemoryBarrier *buffer_mem_barriers, uint32_t image_mem_barrier_count,
const VkImageMemoryBarrier *image_barriers) {
bool skip = false;
const auto rp_state = cb_state->activeRenderPass;
const auto active_subpass = cb_state->activeSubpass;
auto rp_handle = HandleToUint64(rp_state->renderPass);
const auto &self_dependencies = rp_state->self_dependencies[active_subpass];
const auto &dependencies = rp_state->createInfo.pDependencies;
if (self_dependencies.size() == 0) {
skip |=
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
"VUID-vkCmdPipelineBarrier-pDependencies-02024",
"%s: Barriers cannot be set during subpass %d of renderPass 0x%" PRIx64 " with no self-dependency specified.",
funcName, active_subpass, rp_handle);
} else {
// Grab ref to current subpassDescription up-front for use below
const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
// Look for matching mask in any self-dependency
bool stage_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
const auto &sub_src_stage_mask = ExpandPipelineStageFlags(sub_dep.srcStageMask);
const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(sub_dep.dstStageMask);
stage_mask_match = ((sub_src_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(src_stage_mask == (sub_src_stage_mask & src_stage_mask))) &&
((sub_dst_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(dst_stage_mask == (sub_dst_stage_mask & dst_stage_mask)));
if (stage_mask_match) break;
}
if (!stage_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02024",
"%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask of any "
"self-dependency of subpass %d of renderPass 0x%" PRIx64
" for which dstStageMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, src_stage_mask, active_subpass, rp_handle, self_dep_ss.str().c_str());
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02024",
"%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask of any "
"self-dependency of subpass %d of renderPass 0x%" PRIx64
" for which srcStageMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, dst_stage_mask, active_subpass, rp_handle, self_dep_ss.str().c_str());
}
if (0 != buffer_mem_barrier_count) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178",
"%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of renderPass 0x%" PRIx64 ".", funcName,
buffer_mem_barrier_count, active_subpass, rp_handle);
}
for (uint32_t i = 0; i < mem_barrier_count; ++i) {
const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask;
const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask;
bool access_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
access_mask_match = (mb_src_access_mask == (sub_dep.srcAccessMask & mb_src_access_mask)) &&
(mb_dst_access_mask == (sub_dep.dstAccessMask & mb_dst_access_mask));
if (access_mask_match) break;
}
if (!access_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= log_msg(
device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
"VUID-vkCmdPipelineBarrier-pDependencies-02024",
"%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency srcAccessMask "
"for any self-dependency of subpass %d of renderPass 0x%" PRIx64
" for which dstAccessMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, mb_src_access_mask, active_subpass, rp_handle, self_dep_ss.str().c_str());
skip |= log_msg(
device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
"VUID-vkCmdPipelineBarrier-pDependencies-02024",
"%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency dstAccessMask "
"for any self-dependency of subpass %d of renderPass 0x%" PRIx64
" for which srcAccessMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, mb_dst_access_mask, active_subpass, rp_handle, self_dep_ss.str().c_str());
}
}
skip |= ValidateRenderPassImageBarriers(device_data, funcName, cb_state, active_subpass, sub_desc, rp_handle, dependencies,
self_dependencies, image_mem_barrier_count, image_barriers);
bool flag_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
flag_match = sub_dep.dependencyFlags == dependency_flags;
if (flag_match) break;
}
if (!flag_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02024",
"%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value for any "
"self-dependency of subpass %d of renderPass 0x%" PRIx64
". Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, dependency_flags, cb_state->activeSubpass, rp_handle, self_dep_ss.str().c_str());
}
}
return skip;
}
// Array to mask individual accessMask to corresponding stageMask
// accessMask active bit position (0-31) maps to index
const static VkPipelineStageFlags AccessMaskToPipeStage[24] = {
// VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
// VK_ACCESS_INDEX_READ_BIT = 1
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
// VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 2
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
// VK_ACCESS_UNIFORM_READ_BIT = 3
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAYTRACING_BIT_NVX,
// VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 4
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
// VK_ACCESS_SHADER_READ_BIT = 5
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAYTRACING_BIT_NVX,
// VK_ACCESS_SHADER_WRITE_BIT = 6
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAYTRACING_BIT_NVX,
// VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 7
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
// VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 8
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
// VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 9
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
// VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 10
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
// VK_ACCESS_TRANSFER_READ_BIT = 11
VK_PIPELINE_STAGE_TRANSFER_BIT,
// VK_ACCESS_TRANSFER_WRITE_BIT = 12
VK_PIPELINE_STAGE_TRANSFER_BIT,
// VK_ACCESS_HOST_READ_BIT = 13
VK_PIPELINE_STAGE_HOST_BIT,
// VK_ACCESS_HOST_WRITE_BIT = 14
VK_PIPELINE_STAGE_HOST_BIT,
// VK_ACCESS_MEMORY_READ_BIT = 15
VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match
// VK_ACCESS_MEMORY_WRITE_BIT = 16
VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match
// VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 17
VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
// VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 18
VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
// 19
0,
// 20
0,
// VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NVX = 21
VK_PIPELINE_STAGE_RAYTRACING_BIT_NVX,
// VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NVX = 22
VK_PIPELINE_STAGE_RAYTRACING_BIT_NVX,
// VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 23
VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV,
};
// Verify that all bits of access_mask are supported by the src_stage_mask
static bool ValidateAccessMaskPipelineStage(VkAccessFlags access_mask, VkPipelineStageFlags stage_mask) {
// Early out if all commands set, or access_mask NULL
if ((stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (0 == access_mask)) return true;
stage_mask = ExpandPipelineStageFlags(stage_mask);
int index = 0;
// for each of the set bits in access_mask, make sure that supporting stage mask bit(s) are set
while (access_mask) {
index = (u_ffs(access_mask) - 1);
assert(index >= 0);
// Must have "!= 0" compare to prevent warning from MSVC
if ((AccessMaskToPipeStage[index] & stage_mask) == 0) return false; // early out
access_mask &= ~(1 << index); // Mask off bit that's been checked
}
return true;
}
namespace barrier_queue_families {
enum VuIndex {
kSrcOrDstMustBeIgnore,
kSpecialOrIgnoreOnly,
kSrcIgnoreRequiresDstIgnore,
kDstValidOrSpecialIfNotIgnore,
kSrcValidOrSpecialIfNotIgnore,
kSrcAndDestMustBeIgnore,
kBothIgnoreOrBothValid,
kSubmitQueueMustMatchSrcOrDst
};
static const char *vu_summary[] = {"Source or destination queue family must be ignored.",
"Source or destination queue family must be special or ignored.",
"Destination queue family must be ignored if source queue family is.",
"Destination queue family must be valid, ignored, or special.",
"Source queue family must be valid, ignored, or special.",
"Source and destination queue family must both be ignored.",
"Source and destination queue family must both be ignore or both valid.",
"Source or destination queue family must match submit queue family, if not ignored."};
static const std::string image_error_codes[] = {
"VUID-VkImageMemoryBarrier-image-01381", // kSrcOrDstMustBeIgnore
"VUID-VkImageMemoryBarrier-image-01766", // kSpecialOrIgnoreOnly
"VUID-VkImageMemoryBarrier-image-01201", // kSrcIgnoreRequiresDstIgnore
"VUID-VkImageMemoryBarrier-image-01768", // kDstValidOrSpecialIfNotIgnore
"VUID-VkImageMemoryBarrier-image-01767", // kSrcValidOrSpecialIfNotIgnore
"VUID-VkImageMemoryBarrier-image-01199", // kSrcAndDestMustBeIgnore
"VUID-VkImageMemoryBarrier-image-01200", // kBothIgnoreOrBothValid
"VUID-VkImageMemoryBarrier-image-01205", // kSubmitQueueMustMatchSrcOrDst
};
static const std::string buffer_error_codes[] = {
"VUID-VkBufferMemoryBarrier-buffer-01191", // kSrcOrDstMustBeIgnore
"VUID-VkBufferMemoryBarrier-buffer-01763", // kSpecialOrIgnoreOnly
"VUID-VkBufferMemoryBarrier-buffer-01193", // kSrcIgnoreRequiresDstIgnore
"VUID-VkBufferMemoryBarrier-buffer-01765", // kDstValidOrSpecialIfNotIgnore
"VUID-VkBufferMemoryBarrier-buffer-01764", // kSrcValidOrSpecialIfNotIgnore
"VUID-VkBufferMemoryBarrier-buffer-01190", // kSrcAndDestMustBeIgnore
"VUID-VkBufferMemoryBarrier-buffer-01192", // kBothIgnoreOrBothValid
"VUID-VkBufferMemoryBarrier-buffer-01196", // kSubmitQueueMustMatchSrcOrDst
};
class ValidatorState {
public:
ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
const uint64_t barrier_handle64, const VkSharingMode sharing_mode, const VulkanObjectType object_type,
const std::string *val_codes)
: report_data_(device_data->report_data),
func_name_(func_name),
cb_handle64_(HandleToUint64(cb_state->commandBuffer)),
barrier_handle64_(barrier_handle64),
sharing_mode_(sharing_mode),
object_type_(object_type),
val_codes_(val_codes),
limit_(static_cast<uint32_t>(device_data->phys_dev_properties.queue_family_properties.size())),
mem_ext_(device_data->extensions.vk_khr_external_memory) {}
// Create a validator state from an image state... reducing the image specific to the generic version.
ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state)
: ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->image), state->createInfo.sharingMode,
kVulkanObjectTypeImage, image_error_codes) {}
// Create a validator state from an buffer state... reducing the buffer specific to the generic version.
ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state)
: ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->buffer), state->createInfo.sharingMode,
kVulkanObjectTypeImage, buffer_error_codes) {}
// Log the messages using boilerplate from object state, and Vu specific information from the template arg
// One and two family versions, in the single family version, Vu holds the name of the passed parameter
bool LogMsg(VuIndex vu_index, uint32_t family, const char *param_name) const {
const std::string val_code = val_codes_[vu_index];
const char *annotation = GetFamilyAnnotation(family);
return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
val_code, "%s: Barrier using %s 0x%" PRIx64 " created with sharingMode %s, has %s %u%s. %s", func_name_,
GetTypeString(), barrier_handle64_, GetModeString(), param_name, family, annotation, vu_summary[vu_index]);
}
bool LogMsg(VuIndex vu_index, uint32_t src_family, uint32_t dst_family) const {
const std::string val_code = val_codes_[vu_index];
const char *src_annotation = GetFamilyAnnotation(src_family);
const char *dst_annotation = GetFamilyAnnotation(dst_family);
return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
val_code,
"%s: Barrier using %s 0x%" PRIx64
" created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
func_name_, GetTypeString(), barrier_handle64_, GetModeString(), src_family, src_annotation, dst_family,
dst_annotation, vu_summary[vu_index]);
}
// This abstract Vu can only be tested at submit time, thus we need a callback from the closure containing the needed
// data. Note that the mem_barrier is copied to the closure as the lambda lifespan exceed the guarantees of validity for
// application input.
static bool ValidateAtQueueSubmit(const VkQueue queue, const layer_data *device_data, uint32_t src_family, uint32_t dst_family,
const ValidatorState &val) {
auto queue_data_it = device_data->queueMap.find(queue);
if (queue_data_it == device_data->queueMap.end()) return false;
uint32_t queue_family = queue_data_it->second.queueFamilyIndex;
if ((src_family != queue_family) && (dst_family != queue_family)) {
const std::string val_code = val.val_codes_[kSubmitQueueMustMatchSrcOrDst];
const char *src_annotation = val.GetFamilyAnnotation(src_family);
const char *dst_annotation = val.GetFamilyAnnotation(dst_family);
return log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
HandleToUint64(queue), val_code,
"%s: Barrier submitted to queue with family index %u, using %s 0x%" PRIx64
" created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
"vkQueueSubmit", queue_family, val.GetTypeString(), val.barrier_handle64_, val.GetModeString(),
src_family, src_annotation, dst_family, dst_annotation, vu_summary[kSubmitQueueMustMatchSrcOrDst]);
}
return false;
}
// Logical helpers for semantic clarity
inline bool KhrExternalMem() const { return mem_ext_; }
inline bool IsValid(uint32_t queue_family) const { return (queue_family < limit_); }
inline bool IsValidOrSpecial(uint32_t queue_family) const {
return IsValid(queue_family) || (mem_ext_ && IsSpecial(queue_family));
}
inline bool IsIgnored(uint32_t queue_family) const { return queue_family == VK_QUEUE_FAMILY_IGNORED; }
// Helpers for LogMsg (and log_msg)
const char *GetModeString() const { return string_VkSharingMode(sharing_mode_); }
// Descriptive text for the various types of queue family index
const char *GetFamilyAnnotation(uint32_t family) const {
const char *external = " (VK_QUEUE_FAMILY_EXTERNAL_KHR)";
const char *foreign = " (VK_QUEUE_FAMILY_FOREIGN_EXT)";
const char *ignored = " (VK_QUEUE_FAMILY_IGNORED)";
const char *valid = " (VALID)";
const char *invalid = " (INVALID)";
switch (family) {
case VK_QUEUE_FAMILY_EXTERNAL_KHR:
return external;
case VK_QUEUE_FAMILY_FOREIGN_EXT:
return foreign;
case VK_QUEUE_FAMILY_IGNORED:
return ignored;
default:
if (IsValid(family)) {
return valid;
}
return invalid;
};
}
const char *GetTypeString() const { return object_string[object_type_]; }
VkSharingMode GetSharingMode() const { return sharing_mode_; }
protected:
const debug_report_data *const report_data_;
const char *const func_name_;
const uint64_t cb_handle64_;
const uint64_t barrier_handle64_;
const VkSharingMode sharing_mode_;
const VulkanObjectType object_type_;
const std::string *val_codes_;
const uint32_t limit_;
const bool mem_ext_;
};
bool Validate(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state, const ValidatorState &val,
const uint32_t src_queue_family, const uint32_t dst_queue_family) {
bool skip = false;
const bool mode_concurrent = val.GetSharingMode() == VK_SHARING_MODE_CONCURRENT;
const bool src_ignored = val.IsIgnored(src_queue_family);
const bool dst_ignored = val.IsIgnored(dst_queue_family);
if (val.KhrExternalMem()) {
if (mode_concurrent) {
if (!(src_ignored || dst_ignored)) {
skip |= val.LogMsg(kSrcOrDstMustBeIgnore, src_queue_family, dst_queue_family);
}
if ((src_ignored && !(dst_ignored || IsSpecial(dst_queue_family))) ||
(dst_ignored && !(src_ignored || IsSpecial(src_queue_family)))) {
skip |= val.LogMsg(kSpecialOrIgnoreOnly, src_queue_family, dst_queue_family);
}
} else {
// VK_SHARING_MODE_EXCLUSIVE
if (src_ignored && !dst_ignored) {
skip |= val.LogMsg(kSrcIgnoreRequiresDstIgnore, src_queue_family, dst_queue_family);
}
if (!dst_ignored && !val.IsValidOrSpecial(dst_queue_family)) {
skip |= val.LogMsg(kDstValidOrSpecialIfNotIgnore, dst_queue_family, "dstQueueFamilyIndex");
}
if (!src_ignored && !val.IsValidOrSpecial(src_queue_family)) {
skip |= val.LogMsg(kSrcValidOrSpecialIfNotIgnore, src_queue_family, "srcQueueFamilyIndex");
}
}
} else {
// No memory extension
if (mode_concurrent) {
if (!src_ignored || !dst_ignored) {
skip |= val.LogMsg(kSrcAndDestMustBeIgnore, src_queue_family, dst_queue_family);
}
} else {
// VK_SHARING_MODE_EXCLUSIVE
if (!((src_ignored && dst_ignored) || (val.IsValid(src_queue_family) && val.IsValid(dst_queue_family)))) {
skip |= val.LogMsg(kBothIgnoreOrBothValid, src_queue_family, dst_queue_family);
}
}
}
if (!mode_concurrent && !src_ignored && !dst_ignored) {
// Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria
// TODO create a better named list, or rename the submit time lists to something that matches the broader usage...
// Note: if we want to create a semantic that separates state lookup, validation, and state update this should go
// to a local queue of update_state_actions or something.
cb_state->eventUpdates.emplace_back([device_data, src_queue_family, dst_queue_family, val](VkQueue queue) {
return ValidatorState::ValidateAtQueueSubmit(queue, device_data, src_queue_family, dst_queue_family, val);
});
}
return skip;
}
} // namespace barrier_queue_families
// Type specific wrapper for image barriers
bool ValidateBarrierQueueFamilies(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state_data) {
// State data is required
if (!state_data) {
return false;
}
// Create the validator state from the image state
barrier_queue_families::ValidatorState val(device_data, func_name, cb_state, barrier, state_data);
const uint32_t src_queue_family = barrier->srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex;
return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
}
// Type specific wrapper for buffer barriers
bool ValidateBarrierQueueFamilies(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state_data) {
// State data is required
if (!state_data) {
return false;
}
// Create the validator state from the buffer state
barrier_queue_families::ValidatorState val(device_data, func_name, cb_state, barrier, state_data);
const uint32_t src_queue_family = barrier->srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex;
return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
}
static bool ValidateBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) {
bool skip = false;
for (uint32_t i = 0; i < memBarrierCount; ++i) {
const auto &mem_barrier = pMemBarriers[i];
if (!ValidateAccessMaskPipelineStage(mem_barrier.srcAccessMask, src_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184",
"%s: pMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier.srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(mem_barrier.dstAccessMask, dst_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185",
"%s: pMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
mem_barrier.dstAccessMask, dst_stage_mask);
}
}
for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
auto mem_barrier = &pImageMemBarriers[i];
if (!ValidateAccessMaskPipelineStage(mem_barrier->srcAccessMask, src_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184",
"%s: pImageMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier->srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(mem_barrier->dstAccessMask, dst_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185",
"%s: pImageMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
mem_barrier->dstAccessMask, dst_stage_mask);
}
auto image_data = GetImageState(device_data, mem_barrier->image);
skip |= ValidateBarrierQueueFamilies(device_data, funcName, cb_state, mem_barrier, image_data);
if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-VkImageMemoryBarrier-newLayout-01198",
"%s: Image Layout cannot be transitioned to UNDEFINED or PREINITIALIZED.", funcName);
}
if (image_data) {
// There is no VUID for this, but there is blanket text:
// "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
// recording commands in a command buffer."
// TODO: Update this when VUID is defined
skip |= ValidateMemoryIsBoundToImage(device_data, image_data, funcName, kVUIDUndefined);
auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
skip |= ValidateImageAspectMask(device_data, image_data->image, image_data->createInfo.format, aspect_mask, funcName);
std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
skip |= ValidateImageBarrierSubresourceRange(device_data, image_data, mem_barrier->subresourceRange, funcName,
param_name.c_str());
}
}
for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
auto mem_barrier = &pBufferMemBarriers[i];
if (!mem_barrier) continue;
if (!ValidateAccessMaskPipelineStage(mem_barrier->srcAccessMask, src_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184",
"%s: pBufferMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier->srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(mem_barrier->dstAccessMask, dst_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185",
"%s: pBufferMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
mem_barrier->dstAccessMask, dst_stage_mask);
}
// Validate buffer barrier queue family indices
auto buffer_state = GetBufferState(device_data, mem_barrier->buffer);
skip |= ValidateBarrierQueueFamilies(device_data, funcName, cb_state, mem_barrier, buffer_state);
if (buffer_state) {
// There is no VUID for this, but there is blanket text:
// "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
// recording commands in a command buffer"
// TODO: Update this when VUID is defined
skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, funcName, kVUIDUndefined);
auto buffer_size = buffer_state->createInfo.size;
if (mem_barrier->offset >= buffer_size) {
skip |= log_msg(
device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-VkBufferMemoryBarrier-offset-01187",
"%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
HandleToUint64(buffer_size));
} else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
skip |=
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-VkBufferMemoryBarrier-size-01189",
"%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
" whose sum is greater than total size 0x%" PRIx64 ".",
funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
HandleToUint64(mem_barrier->size), HandleToUint64(buffer_size));
}
}
}
skip |= ValidateBarriersQFOTransferUniqueness(device_data, funcName, cb_state, bufferBarrierCount, pBufferMemBarriers,
imageMemBarrierCount, pImageMemBarriers);
return skip;
}
bool ValidateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
VkPipelineStageFlags sourceStageMask) {
bool skip = false;
VkPipelineStageFlags stageMask = 0;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
for (uint32_t i = 0; i < eventCount; ++i) {
auto event = pCB->events[firstEventIndex + i];
auto queue_data = dev_data->queueMap.find(queue);
if (queue_data == dev_data->queueMap.end()) return false;
auto event_data = queue_data->second.eventToStageMap.find(event);
if (event_data != queue_data->second.eventToStageMap.end()) {
stageMask |= event_data->second;
} else {
auto global_event_data = GetEventNode(dev_data, event);
if (!global_event_data) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
HandleToUint64(event), kVUID_Core_DrawState_InvalidEvent,
"Event 0x%" PRIx64 " cannot be waited on if it has never been set.", HandleToUint64(event));
} else {
stageMask |= global_event_data->stageMask;
}
}
}
// TODO: Need to validate that host_bit is only set if set event is called
// but set event can be called at any time.
if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), "VUID-vkCmdWaitEvents-srcStageMask-parameter",
"Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%X which must be the bitwise OR of "
"the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with "
"vkSetEvent but instead is 0x%X.",
sourceStageMask, stageMask);
}
return skip;
}
// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
{VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
{VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
std::string error_code) {
bool skip = false;
// Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
for (const auto &item : stage_flag_bit_array) {
if (stage_mask & item) {
if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), error_code,
"%s(): %s flag %s is not compatible with the queue family properties of this command buffer.",
function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)));
}
}
}
return skip;
}
// Check if all barriers are of a given operation type.
template <typename Barrier, typename OpCheck>
static bool AllTransferOp(const COMMAND_POOL_NODE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) {
if (!pool) return false;
for (uint32_t b = 0; b < count; b++) {
if (!op_check(pool, barriers + b)) return false;
}
return true;
}
enum BarrierOperationsType {
kAllAcquire, // All Barrier operations are "ownership acquire" operations
kAllRelease, // All Barrier operations are "ownership release" operations
kGeneral, // Either no ownership operations or a mix of ownership operation types and/or non-ownership operations
};
// Look at the barriers to see if we they are all release or all acquire, the result impacts queue properties validation
BarrierOperationsType ComputeBarrierOperationsType(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t buffer_barrier_count,
const VkBufferMemoryBarrier *buffer_barriers, uint32_t image_barrier_count,
const VkImageMemoryBarrier *image_barriers) {
auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
BarrierOperationsType op_type = kGeneral;
// Look at the barrier details only if they exist
// Note: AllTransferOp returns true for count == 0
if ((buffer_barrier_count + image_barrier_count) != 0) {
if (AllTransferOp(pool, IsReleaseOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
AllTransferOp(pool, IsReleaseOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
op_type = kAllRelease;
} else if (AllTransferOp(pool, IsAcquireOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
AllTransferOp(pool, IsAcquireOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
op_type = kAllAcquire;
}
}
return op_type;
}
bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE const *cb_state,
VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
BarrierOperationsType barrier_op_type, const char *function,
std::string error_code) {
bool skip = false;
uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
auto physical_device_state = GetPhysicalDeviceState(instance_data, dev_data->physical_device);
// Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
// specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
// that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
if (queue_family_index < physical_device_state->queue_family_properties.size()) {
VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
// Only check the source stage mask if any barriers aren't "acquire ownership"
if ((barrier_op_type != kAllAcquire) && (source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
function, "srcStageMask", error_code);
}
// Only check the dest stage mask if any barriers aren't "release ownership"
if ((barrier_op_type != kAllRelease) && (dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
function, "dstStageMask", error_code);
}
}
return skip;
}
static bool PreCallValidateCmdEventCount(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineStageFlags sourceStageMask,
VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount,
const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) {
auto barrier_op_type = ComputeBarrierOperationsType(dev_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
bool skip = ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, barrier_op_type,
"vkCmdWaitEvents", "VUID-vkCmdWaitEvents-srcStageMask-01164");
skip |= ValidateStageMaskGsTsEnables(dev_data, sourceStageMask, "vkCmdWaitEvents()",
"VUID-vkCmdWaitEvents-srcStageMask-01159",
"VUID-vkCmdWaitEvents-srcStageMask-01161",
"VUID-vkCmdWaitEvents-srcStageMask-02111",
"VUID-vkCmdWaitEvents-srcStageMask-02112");
skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdWaitEvents()",
"VUID-vkCmdWaitEvents-dstStageMask-01160",
"VUID-vkCmdWaitEvents-dstStageMask-01162",
"VUID-vkCmdWaitEvents-dstStageMask-02113",
"VUID-vkCmdWaitEvents-dstStageMask-02114");
skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdWaitEvents-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
skip |= ValidateBarriersToImages(dev_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
skip |= ValidateBarriers(dev_data, "vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount,
pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
return skip;
}
static void PreCallRecordCmdWaitEvents(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) {
auto first_event_index = cb_state->events.size();
for (uint32_t i = 0; i < eventCount; ++i) {
auto event_state = GetEventNode(dev_data, pEvents[i]);
if (event_state) {
AddCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(pEvents[i]), kVulkanObjectTypeEvent}, cb_state);
event_state->cb_bindings.insert(cb_state);
}
cb_state->waitedEvents.insert(pEvents[i]);
cb_state->events.push_back(pEvents[i]);
}
cb_state->eventUpdates.emplace_back(
[=](VkQueue q) { return ValidateEventStageMask(q, cb_state, eventCount, first_event_index, sourceStageMask); });
TransitionImageLayouts(dev_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
}
static void PostCallRecordCmdWaitEvents(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) {
RecordBarriersQFOTransfers(dev_data, "vkCmdWaitEvents()", cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
}
VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdEventCount(dev_data, cb_state, sourceStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
if (!skip) {
PreCallRecordCmdWaitEvents(dev_data, cb_state, eventCount, pEvents, sourceStageMask, imageMemoryBarrierCount,
pImageMemoryBarriers);
}
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
}
lock.lock();
PostCallRecordCmdWaitEvents(dev_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
}
static bool PreCallValidateCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
bool skip = false;
auto barrier_op_type = ComputeBarrierOperationsType(device_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
skip |= ValidateStageMasksAgainstQueueCapabilities(device_data, cb_state, srcStageMask, dstStageMask, barrier_op_type,
"vkCmdPipelineBarrier", "VUID-vkCmdPipelineBarrier-srcStageMask-01183");
skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdPipelineBarrier()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdPipelineBarrier-commandBuffer-cmdpool");
skip |= ValidateCmd(device_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
skip |= ValidateStageMaskGsTsEnables(device_data, srcStageMask, "vkCmdPipelineBarrier()",
"VUID-vkCmdPipelineBarrier-srcStageMask-01168",
"VUID-vkCmdPipelineBarrier-srcStageMask-01170",
"VUID-vkCmdPipelineBarrier-srcStageMask-02115",
"VUID-vkCmdPipelineBarrier-srcStageMask-02116");
skip |= ValidateStageMaskGsTsEnables(device_data, dstStageMask, "vkCmdPipelineBarrier()",
"VUID-vkCmdPipelineBarrier-dstStageMask-01169",
"VUID-vkCmdPipelineBarrier-dstStageMask-01171",
"VUID-vkCmdPipelineBarrier-dstStageMask-02117",
"VUID-vkCmdPipelineBarrier-dstStageMask-02118");
if (cb_state->activeRenderPass) {
skip |= ValidateRenderPassPipelineBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask,
dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
if (skip) return true; // Early return to avoid redundant errors from below calls
}
skip |=
ValidateBarriersToImages(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()");
skip |= ValidateBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount,
pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
return skip;
}
static void PreCallRecordCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
RecordBarriersQFOTransfers(device_data, "vkCmdPipelineBarrier()", cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
TransitionImageLayouts(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
}
VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
bool skip = false;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdPipelineBarrier(device_data, cb_state, srcStageMask, dstStageMask, dependencyFlags,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
if (!skip) {
PreCallRecordCmdPipelineBarrier(device_data, cb_state, commandBuffer, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
}
} else {
assert(0);
}
lock.unlock();
if (!skip) {
device_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
}
}
static bool SetQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
pCB->queryToStateMap[object] = value;
}
auto queue_data = dev_data->queueMap.find(queue);
if (queue_data != dev_data->queueMap.end()) {
queue_data->second.queryToStateMap[object] = value;
}
return false;
}
static bool PreCallValidateCmdBeginQuery(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
bool skip = ValidateCmdQueueFlags(dev_data, pCB, "vkCmdBeginQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBeginQuery-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
return skip;
}
static void PostCallRecordCmdBeginQuery(layer_data *dev_data, VkQueryPool queryPool, uint32_t slot, GLOBAL_CB_NODE *pCB) {
QueryObject query = {queryPool, slot};
pCB->activeQueries.insert(query);
pCB->startedQueries.insert(query);
AddCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
{HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, pCB);
}
VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
PreCallValidateCmdBeginQuery(dev_data, pCB);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
lock.lock();
if (pCB) {
PostCallRecordCmdBeginQuery(dev_data, queryPool, slot, pCB);
}
}
static bool PreCallValidateCmdEndQuery(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const QueryObject &query,
VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
bool skip = false;
if (!cb_state->activeQueries.count(query)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdEndQuery-None-01923",
"Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d.", HandleToUint64(queryPool), slot);
}
skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdEndQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdEndQuery-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_ENDQUERY, "VkCmdEndQuery()");
return skip;
}
static void PostCallRecordCmdEndQuery(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const QueryObject &query,
VkCommandBuffer commandBuffer, VkQueryPool queryPool) {
cb_state->activeQueries.erase(query);
cb_state->queryUpdates.emplace_back([=](VkQueue q) { return SetQueryState(q, commandBuffer, query, true); });
AddCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
{HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
}
VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
QueryObject query = {queryPool, slot};
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdEndQuery(dev_data, cb_state, query, commandBuffer, queryPool, slot);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
lock.lock();
if (cb_state) {
PostCallRecordCmdEndQuery(dev_data, cb_state, query, commandBuffer, queryPool);
}
}
static bool PreCallValidateCmdResetQueryPool(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
bool skip = InsideRenderPass(dev_data, cb_state, "vkCmdResetQueryPool()", "VUID-vkCmdResetQueryPool-renderpass");
skip |= ValidateCmd(dev_data, cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdResetQueryPool-commandBuffer-cmdpool");
return skip;
}
static void PostCallRecordCmdResetQueryPool(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
for (uint32_t i = 0; i < queryCount; i++) {
QueryObject query = {queryPool, firstQuery + i};
cb_state->waitedEventsBeforeQueryReset[query] = cb_state->waitedEvents;
cb_state->queryUpdates.emplace_back([=](VkQueue q) { return SetQueryState(q, commandBuffer, query, false); });
}
AddCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
{HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
}
VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
bool skip = PreCallValidateCmdResetQueryPool(dev_data, cb_state);
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
lock.lock();
PostCallRecordCmdResetQueryPool(dev_data, cb_state, commandBuffer, queryPool, firstQuery, queryCount);
}
static bool IsQueryInvalid(layer_data *dev_data, QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) {
QueryObject query = {queryPool, queryIndex};
auto query_data = queue_data->queryToStateMap.find(query);
if (query_data != queue_data->queryToStateMap.end()) {
if (!query_data->second) return true;
} else {
auto it = dev_data->queryToStateMap.find(query);
if (it == dev_data->queryToStateMap.end() || !it->second) return true;
}
return false;
}
static bool ValidateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
auto queue_data = GetQueueState(dev_data, queue);
if (!queue_data) return false;
for (uint32_t i = 0; i < queryCount; i++) {
if (IsQueryInvalid(dev_data, queue_data, queryPool, firstQuery + i)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidQuery,
"Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
HandleToUint64(queryPool), firstQuery + i);
}
}
return skip;
}
static bool PreCallValidateCmdCopyQueryPoolResults(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, BUFFER_STATE *dst_buff_state) {
bool skip = ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()",
"VUID-vkCmdCopyQueryPoolResults-dstBuffer-00826");
// Validate that DST buffer has correct usage flags set
skip |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdCopyQueryPoolResults-dstBuffer-00825", "vkCmdCopyQueryPoolResults()",
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdCopyQueryPoolResults()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyQueryPoolResults-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
skip |= InsideRenderPass(dev_data, cb_state, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-renderpass");
return skip;
}
static void PostCallRecordCmdCopyQueryPoolResults(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, BUFFER_STATE *dst_buff_state,
VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
AddCommandBufferBindingBuffer(dev_data, cb_state, dst_buff_state);
cb_state->queryUpdates.emplace_back([=](VkQueue q) { return ValidateQuery(q, cb_state, queryPool, firstQuery, queryCount); });
AddCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
{HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
}
VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize stride, VkQueryResultFlags flags) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(dev_data, commandBuffer);
auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
if (cb_node && dst_buff_state) {
skip |= PreCallValidateCmdCopyQueryPoolResults(dev_data, cb_node, dst_buff_state);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride,
flags);
lock.lock();
if (cb_node && dst_buff_state) {
PostCallRecordCmdCopyQueryPoolResults(dev_data, cb_node, dst_buff_state, queryPool, firstQuery, queryCount);
}
}
static bool PreCallValidateCmdPushConstants(layer_data *dev_data, VkCommandBuffer commandBuffer, VkPipelineLayout layout,
VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size) {
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdPushConstants-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
}
skip |= ValidatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
if (0 == stageFlags) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-stageFlags-requiredbitmask",
"vkCmdPushConstants() call has no stageFlags set.");
}
// Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command
// stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range.
if (!skip) {
const auto &ranges = *GetPipelineLayout(dev_data, layout)->push_constant_ranges;
VkShaderStageFlags found_stages = 0;
for (const auto &range : ranges) {
if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) {
VkShaderStageFlags matching_stages = range.stageFlags & stageFlags;
if (matching_stages != range.stageFlags) {
// "VUID-vkCmdPushConstants-offset-01796" VUID-vkCmdPushConstants-offset-01796
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
"VUID-vkCmdPushConstants-offset-01796",
"vkCmdPushConstants(): stageFlags (0x%" PRIx32 ", offset (%" PRIu32 "), and size (%" PRIu32
"), "
"must contain all stages in overlapping VkPushConstantRange stageFlags (0x%" PRIx32
"), offset (%" PRIu32 "), and size (%" PRIu32 ") in pipeline layout 0x%" PRIx64 ".",
(uint32_t)stageFlags, offset, size, (uint32_t)range.stageFlags, range.offset, range.size,
HandleToUint64(layout));
}
// Accumulate all stages we've found
found_stages = matching_stages | found_stages;
}
}
if (found_stages != stageFlags) {
// "VUID-vkCmdPushConstants-offset-01795" VUID-vkCmdPushConstants-offset-01795
uint32_t missing_stages = ~found_stages & stageFlags;
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-offset-01795",
"vkCmdPushConstants(): stageFlags = 0x%" PRIx32 ", VkPushConstantRange in pipeline layout 0x%" PRIx64
" overlapping offset = %d and size = %d, do not contain stageFlags 0x%" PRIx32 ".",
(uint32_t)stageFlags, HandleToUint64(layout), offset, size, missing_stages);
}
}
return skip;
}
VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
uint32_t offset, uint32_t size, const void *pValues) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
skip |= PreCallValidateCmdPushConstants(dev_data, commandBuffer, layout, stageFlags, offset, size);
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
}
static bool PreCallValidateCmdWriteTimestamp(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWriteTimestamp()",
VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
"VUID-vkCmdWriteTimestamp-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
return skip;
}
static void PostCallRecordCmdWriteTimestamp(GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer, VkQueryPool queryPool,
uint32_t slot) {
QueryObject query = {queryPool, slot};
cb_state->queryUpdates.emplace_back([=](VkQueue q) { return SetQueryState(q, commandBuffer, query, true); });
}
VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool, uint32_t slot) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdWriteTimestamp(dev_data, cb_state);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
lock.lock();
if (cb_state) PostCallRecordCmdWriteTimestamp(cb_state, commandBuffer, queryPool, slot);
}
static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag, std::string error_code) {
bool skip = false;
for (uint32_t attach = 0; attach < count; attach++) {
if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
// Attachment counts are verified elsewhere, but prevent an invalid access
if (attachments[attach].attachment < fbci->attachmentCount) {
const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
auto view_state = GetImageViewState(dev_data, *image_view);
if (view_state) {
const VkImageCreateInfo *ici = &GetImageState(dev_data, view_state->create_info.image)->createInfo;
if (ici != nullptr) {
if ((ici->usage & usage_flag) == 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, error_code,
"vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's "
"IMAGE_USAGE flags (%s).",
attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
}
}
}
}
}
}
return skip;
}
// Validate VkFramebufferCreateInfo which includes:
// 1. attachmentCount equals renderPass attachmentCount
// 2. corresponding framebuffer and renderpass attachments have matching formats
// 3. corresponding framebuffer and renderpass attachments have matching sample counts
// 4. fb attachments only have a single mip level
// 5. fb attachment dimensions are each at least as large as the fb
// 6. fb attachments use idenity swizzle
// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
// 8. fb dimensions are within physical device limits
static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
bool skip = false;
auto rp_state = GetRenderPassState(dev_data, pCreateInfo->renderPass);
if (rp_state) {
const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-attachmentCount-00876",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount "
"of %u of renderPass (0x%" PRIx64 ") being used to create Framebuffer.",
pCreateInfo->attachmentCount, rpci->attachmentCount, HandleToUint64(pCreateInfo->renderPass));
} else {
// attachmentCounts match, so make sure corresponding attachment details line up
const VkImageView *image_views = pCreateInfo->pAttachments;
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
auto view_state = GetImageViewState(dev_data, image_views[i]);
auto &ivci = view_state->create_info;
if (ivci.format != rpci->pAttachments[i].format) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-pAttachments-00880",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not "
"match the format of %s used by the corresponding attachment for renderPass (0x%" PRIx64 ").",
i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
HandleToUint64(pCreateInfo->renderPass));
}
const VkImageCreateInfo *ici = &GetImageState(dev_data, ivci.image)->createInfo;
if (ici->samples != rpci->pAttachments[i].samples) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-pAttachments-00881",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match the %s "
"samples used by the corresponding attachment for renderPass (0x%" PRIx64 ").",
i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
HandleToUint64(pCreateInfo->renderPass));
}
// Verify that view only has a single mip level
if (ivci.subresourceRange.levelCount != 1) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
0, "VUID-VkFramebufferCreateInfo-pAttachments-00883",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but "
"only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.",
i, ivci.subresourceRange.levelCount);
}
const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
(mip_height < pCreateInfo->height)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
0, "VUID-VkFramebufferCreateInfo-pAttachments-00882",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions "
"smaller than the corresponding framebuffer dimensions. Here are the respective dimensions for "
"attachment #%u, framebuffer:\n"
"width: %u, %u\n"
"height: %u, %u\n"
"layerCount: %u, %u\n",
i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
}
if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
0, "VUID-VkFramebufferCreateInfo-pAttachments-00884",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All "
"framebuffer attachments must have been created with the identity swizzle. Here are the actual "
"swizzle values:\n"
"r swizzle = %s\n"
"g swizzle = %s\n"
"b swizzle = %s\n"
"a swizzle = %s\n",
i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
}
}
}
// Verify correct attachment usage flags
for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
// Verify input attachments:
skip |=
MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00879");
// Verify color attachments:
skip |=
MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00877");
// Verify depth/stencil attachments:
if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
skip |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00878");
}
}
}
// Verify FB dimensions are within physical device limits
if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-width-00886",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested "
"width: %u, device max: %u\n",
pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth);
}
if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-height-00888",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested "
"height: %u, device max: %u\n",
pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight);
}
if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-layers-00890",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested "
"layers: %u, device max: %u\n",
pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
}
// Verify FB dimensions are greater than zero
if (pCreateInfo->width <= 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-width-00885",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero.");
}
if (pCreateInfo->height <= 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-height-00887",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero.");
}
if (pCreateInfo->layers <= 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-layers-00889",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero.");
}
return skip;
}
// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
// Return true if an error is encountered and callback returns true to skip call down chain
// false indicates that call down chain should proceed
static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
// TODO : Verify that renderPass FB is created with is compatible with FB
bool skip = false;
skip |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
return skip;
}
// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
// Shadow create info and store in map
std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
new FRAMEBUFFER_STATE(fb, pCreateInfo, GetRenderPassStateSharedPtr(dev_data, pCreateInfo->renderPass)));
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
VkImageView view = pCreateInfo->pAttachments[i];
auto view_state = GetImageViewState(dev_data, view);
if (!view_state) {
continue;
}
#ifdef FRAMEBUFFER_ATTACHMENT_STATE_CACHE
MT_FB_ATTACHMENT_INFO fb_info;
fb_info.view_state = view_state;
fb_info.image = view_state->create_info.image;
fb_state->attachments.push_back(fb_info);
#endif
}
dev_data->frameBufferMap[fb] = std::move(fb_state);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
lock.unlock();
}
return result;
}
static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
std::unordered_set<uint32_t> &processed_nodes) {
// If we have already checked this node we have not found a dependency path so return false.
if (processed_nodes.count(index)) return false;
processed_nodes.insert(index);
const DAGNode &node = subpass_to_node[index];
// Look for a dependency path. If one exists return true else recurse on the previous nodes.
if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
for (auto elem : node.prev) {
if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
}
} else {
return true;
}
return false;
}
static bool CheckDependencyExists(const layer_data *dev_data, const uint32_t subpass,
const std::vector<uint32_t> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node,
bool &skip) {
bool result = true;
// Loop through all subpasses that share the same attachment and make sure a dependency exists
for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
const DAGNode &node = subpass_to_node[subpass];
// Check for a specified dependency between the two nodes. If one exists we are done.
auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
// If no dependency exits an implicit dependency still might. If not, throw an error.
std::unordered_set<uint32_t> processed_nodes;
if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
dependent_subpasses[k]);
result = false;
}
}
}
return result;
}
static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) {
const DAGNode &node = subpass_to_node[index];
// If this node writes to the attachment return true as next nodes need to preserve the attachment.
const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (attachment == subpass.pColorAttachments[j].attachment) return true;
}
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
if (attachment == subpass.pInputAttachments[j].attachment) return true;
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
}
bool result = false;
// Loop through previous nodes and see if any of them write to the attachment.
for (auto elem : node.prev) {
result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
}
// If the attachment was written to by a previous node than this node needs to preserve it.
if (result && depth > 0) {
bool has_preserved = false;
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
if (subpass.pPreserveAttachments[j] == attachment) {
has_preserved = true;
break;
}
}
if (!has_preserved) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
}
}
return result;
}
template <class T>
bool IsRangeOverlapping(T offset1, T size1, T offset2, T size2) {
return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
((offset1 > offset2) && (offset1 < (offset2 + size2)));
}
bool IsRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
return (IsRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
IsRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
}
static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
RENDER_PASS_STATE const *renderPass) {
bool skip = false;
auto const pFramebufferInfo = framebuffer->createInfo.ptr();
auto const pCreateInfo = renderPass->createInfo.ptr();
auto const &subpass_to_node = renderPass->subpassToNode;
std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
// Find overlapping attachments
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
VkImageView viewi = pFramebufferInfo->pAttachments[i];
VkImageView viewj = pFramebufferInfo->pAttachments[j];
if (viewi == viewj) {
overlapping_attachments[i].push_back(j);
overlapping_attachments[j].push_back(i);
continue;
}
auto view_state_i = GetImageViewState(dev_data, viewi);
auto view_state_j = GetImageViewState(dev_data, viewj);
if (!view_state_i || !view_state_j) {
continue;
}
auto view_ci_i = view_state_i->create_info;
auto view_ci_j = view_state_j->create_info;
if (view_ci_i.image == view_ci_j.image && IsRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
overlapping_attachments[i].push_back(j);
overlapping_attachments[j].push_back(i);
continue;
}
auto image_data_i = GetImageState(dev_data, view_ci_i.image);
auto image_data_j = GetImageState(dev_data, view_ci_j.image);
if (!image_data_i || !image_data_j) {
continue;
}
if (image_data_i->binding.mem == image_data_j->binding.mem &&
IsRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
image_data_j->binding.size)) {
overlapping_attachments[i].push_back(j);
overlapping_attachments[j].push_back(i);
}
}
}
for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
uint32_t attachment = i;
for (auto other_attachment : overlapping_attachments[i]) {
if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
HandleToUint64(framebuffer->framebuffer), "VUID-VkRenderPassCreateInfo-attachment-00833",
"Attachment %d aliases attachment %d but doesn't set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
attachment, other_attachment);
}
if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
HandleToUint64(framebuffer->framebuffer), "VUID-VkRenderPassCreateInfo-attachment-00833",
"Attachment %d aliases attachment %d but doesn't set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
other_attachment, attachment);
}
}
}
// Find for each attachment the subpasses that use them.
unordered_set<uint32_t> attachmentIndices;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
attachmentIndices.clear();
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
input_attachment_to_subpass[attachment].push_back(i);
for (auto overlapping_attachment : overlapping_attachments[attachment]) {
input_attachment_to_subpass[overlapping_attachment].push_back(i);
}
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
output_attachment_to_subpass[attachment].push_back(i);
for (auto overlapping_attachment : overlapping_attachments[attachment]) {
output_attachment_to_subpass[overlapping_attachment].push_back(i);
}
attachmentIndices.insert(attachment);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
output_attachment_to_subpass[attachment].push_back(i);
for (auto overlapping_attachment : overlapping_attachments[attachment]) {
output_attachment_to_subpass[overlapping_attachment].push_back(i);
}
if (attachmentIndices.count(attachment)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
}
}
}
// If there is a dependency needed make sure one exists
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
// If the attachment is an input then all subpasses that output must have a dependency relationship
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
}
// If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
}
}
// Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
// written.
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip);
}
}
return skip;
}
static bool CreatePassDAG(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, RENDER_PASS_STATE *render_pass) {
// Shorthand...
auto &subpass_to_node = render_pass->subpassToNode;
subpass_to_node.resize(pCreateInfo->subpassCount);
auto &self_dependencies = render_pass->self_dependencies;
self_dependencies.resize(pCreateInfo->subpassCount);
bool skip = false;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
subpass_to_node[i].pass = i;
self_dependencies[i].clear();
}
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
if (dependency.srcSubpass == dependency.dstSubpass) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass, "The src and dest subpasses cannot both be external.");
}
} else if (dependency.srcSubpass > dependency.dstSubpass) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"Dependency graph must be specified such that an earlier pass cannot depend on a later pass.");
} else if (dependency.srcSubpass == dependency.dstSubpass) {
self_dependencies[dependency.srcSubpass].push_back(i);
} else {
subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
}
}
return skip;
}
static void PostCallRecordCreateShaderModule(layer_data *dev_data, bool spirv_valid, const VkShaderModuleCreateInfo *pCreateInfo,
VkShaderModule *pShaderModule) {
unique_ptr<shader_module> new_shader_module(spirv_valid ? new shader_module(pCreateInfo, *pShaderModule) : new shader_module());
dev_data->shaderModuleMap[*pShaderModule] = std::move(new_shader_module);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool spirv_valid;
if (PreCallValidateCreateShaderModule(dev_data, pCreateInfo, &spirv_valid)) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
if (res == VK_SUCCESS) {
lock_guard_t lock(global_lock);
PostCallRecordCreateShaderModule(dev_data, spirv_valid, pCreateInfo, pShaderModule);
}
return res;
}
static bool ValidateAttachmentIndex(const layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
bool skip = false;
if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo-attachment-00834",
"CreateRenderPass: %s attachment %d must be less than the total number of attachments %d.", type,
attachment, attachment_count);
}
return skip;
}
static bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
enum AttachmentType {
ATTACHMENT_COLOR = 1,
ATTACHMENT_DEPTH = 2,
ATTACHMENT_INPUT = 4,
ATTACHMENT_PRESERVE = 8,
ATTACHMENT_RESOLVE = 16,
};
char const *StringAttachmentType(uint8_t type) {
switch (type) {
case ATTACHMENT_COLOR:
return "color";
case ATTACHMENT_DEPTH:
return "depth";
case ATTACHMENT_INPUT:
return "input";
case ATTACHMENT_PRESERVE:
return "preserve";
case ATTACHMENT_RESOLVE:
return "resolve";
default:
return "(multiple)";
}
}
static bool AddAttachmentUse(const layer_data *dev_data, uint32_t subpass, std::vector<uint8_t> &attachment_uses,
std::vector<VkImageLayout> &attachment_layouts, uint32_t attachment, uint8_t new_use,
VkImageLayout new_layout) {
if (attachment >= attachment_uses.size()) return false; /* out of range, but already reported */
bool skip = false;
auto &uses = attachment_uses[attachment];
if (uses & new_use) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"vkCreateRenderPass(): subpass %u already uses attachment %u as a %s attachment.", subpass, attachment,
StringAttachmentType(new_use));
} else if (uses & ~ATTACHMENT_INPUT || (uses && (new_use == ATTACHMENT_RESOLVE || new_use == ATTACHMENT_PRESERVE))) {
/* Note: input attachments are assumed to be done first. */
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescription-pPreserveAttachments-00854",
"vkCreateRenderPass(): subpass %u uses attachment %u as both %s and %s attachment.", subpass, attachment,
StringAttachmentType(uses), StringAttachmentType(new_use));
} else if (uses && attachment_layouts[attachment] != new_layout) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescription-layout-00855",
"vkCreateRenderPass(): subpass %u uses attachment %u with conflicting layouts: input uses %s, but %s "
"attachment uses %s.",
subpass, attachment, string_VkImageLayout(attachment_layouts[attachment]), StringAttachmentType(new_use),
string_VkImageLayout(new_layout));
} else {
attachment_layouts[attachment] = new_layout;
uses |= new_use;
}
return skip;
}
static bool ValidateRenderpassAttachmentUsage(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
bool skip = false;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
std::vector<uint8_t> attachment_uses(pCreateInfo->attachmentCount);
std::vector<VkImageLayout> attachment_layouts(pCreateInfo->attachmentCount);
if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescription-pipelineBindPoint-00844",
"vkCreateRenderPass(): Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
}
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
auto const &attachment_ref = subpass.pInputAttachments[j];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentIndex(dev_data, attachment_ref.attachment, pCreateInfo->attachmentCount, "Input");
skip |= AddAttachmentUse(dev_data, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_INPUT, attachment_ref.layout);
}
}
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
uint32_t attachment = subpass.pPreserveAttachments[j];
if (attachment == VK_ATTACHMENT_UNUSED) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescription-attachment-00853",
"vkCreateRenderPass(): Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
} else {
skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
skip |= AddAttachmentUse(dev_data, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_PRESERVE,
VkImageLayout(0) /* preserve doesn't have any layout */);
}
}
unsigned sample_count = 0;
bool subpass_performs_resolve = false;
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (subpass.pResolveAttachments) {
auto const &attachment_ref = subpass.pResolveAttachments[j];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentIndex(dev_data, attachment_ref.attachment, pCreateInfo->attachmentCount, "Resolve");
skip |= AddAttachmentUse(dev_data, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_RESOLVE, attachment_ref.layout);
subpass_performs_resolve = true;
if (!skip && pCreateInfo->pAttachments[attachment_ref.attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
0, "VUID-VkSubpassDescription-pResolveAttachments-00849",
"vkCreateRenderPass(): Subpass %u requests multisample resolve into attachment %u, which must "
"have VK_SAMPLE_COUNT_1_BIT but has %s.",
i, attachment_ref.attachment,
string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples));
}
}
}
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
auto const &attachment_ref = subpass.pColorAttachments[j];
skip |= ValidateAttachmentIndex(dev_data, attachment_ref.attachment, pCreateInfo->attachmentCount, "Color");
if (!skip && attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
skip |= AddAttachmentUse(dev_data, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_COLOR, attachment_ref.layout);
sample_count |= (unsigned)pCreateInfo->pAttachments[attachment_ref.attachment].samples;
if (subpass_performs_resolve &&
pCreateInfo->pAttachments[attachment_ref.attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
0, "VUID-VkSubpassDescription-pResolveAttachments-00848",
"vkCreateRenderPass(): Subpass %u requests multisample resolve from attachment %u which has "
"VK_SAMPLE_COUNT_1_BIT.",
i, attachment_ref.attachment);
}
if (dev_data->extensions.vk_amd_mixed_attachment_samples && subpass.pDepthStencilAttachment &&
subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto depth_stencil_sample_count =
pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples;
if (pCreateInfo->pAttachments[attachment_ref.attachment].samples > depth_stencil_sample_count) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescription-pColorAttachments-01506",
"vkCreateRenderPass(): Subpass %u pColorAttachments[%u] has %s which is larger than "
"depth/stencil attachment %s.",
i, j, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples),
string_VkSampleCountFlagBits(depth_stencil_sample_count));
}
}
}
if (!skip && subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED) {
if (attachment_ref.attachment == VK_ATTACHMENT_UNUSED) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
0, "VUID-VkSubpassDescription-pResolveAttachments-00847",
"vkCreateRenderPass(): Subpass %u requests multisample resolve from attachment %u which has "
"attachment=VK_ATTACHMENT_UNUSED.",
i, attachment_ref.attachment);
} else {
const auto &color_desc = pCreateInfo->pAttachments[attachment_ref.attachment];
const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
if (color_desc.format != resolve_desc.format) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
0, "VUID-VkSubpassDescription-pResolveAttachments-00850",
"vkCreateRenderPass(): Subpass %u pColorAttachments[%u] resolves to an attachment with a "
"different format. color format: %u, resolve format: %u.",
i, j, color_desc.format, resolve_desc.format);
}
}
}
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
auto const &attachment_ref = *subpass.pDepthStencilAttachment;
skip |= ValidateAttachmentIndex(dev_data, attachment_ref.attachment, pCreateInfo->attachmentCount, "Depth stencil");
if (!skip && attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
skip |= AddAttachmentUse(dev_data, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_DEPTH, attachment_ref.layout);
sample_count |= (unsigned)pCreateInfo->pAttachments[attachment_ref.attachment].samples;
}
}
if (!dev_data->extensions.vk_amd_mixed_attachment_samples && sample_count && !IsPowerOfTwo(sample_count)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkAttachmentDescription-samples-parameter",
"vkCreateRenderPass(): Subpass %u attempts to render to attachments with inconsistent sample counts.", i);
}
}
return skip;
}
static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass, uint32_t index, bool is_read) {
if (index == VK_ATTACHMENT_UNUSED) return;
if (!render_pass->attachment_first_read.count(index)) render_pass->attachment_first_read[index] = is_read;
}
static bool PreCallValidateCreateRenderPass(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
RENDER_PASS_STATE *render_pass) {
bool skip = false;
// TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
// ValidateLayouts.
skip |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
render_pass->renderPass = VK_NULL_HANDLE;
skip |= CreatePassDAG(dev_data, pCreateInfo, render_pass);
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
auto const &dependency = pCreateInfo->pDependencies[i];
skip |= ValidateStageMaskGsTsEnables(dev_data, dependency.srcStageMask, "vkCreateRenderPass()",
"VUID-VkSubpassDependency-srcStageMask-00860",
"VUID-VkSubpassDependency-srcStageMask-00862",
"VUID-VkSubpassDependency-srcStageMask-02099",
"VUID-VkSubpassDependency-srcStageMask-02100");
skip |= ValidateStageMaskGsTsEnables(dev_data, dependency.dstStageMask, "vkCreateRenderPass()",
"VUID-VkSubpassDependency-dstStageMask-00861",
"VUID-VkSubpassDependency-dstStageMask-00863",
"VUID-VkSubpassDependency-dstStageMask-02101",
"VUID-VkSubpassDependency-dstStageMask-02102");
if (!ValidateAccessMaskPipelineStage(dependency.srcAccessMask, dependency.srcStageMask)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDependency-srcAccessMask-00868",
"CreateRenderPass: pDependencies[%u].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", i,
dependency.srcAccessMask, dependency.srcStageMask);
}
if (!ValidateAccessMaskPipelineStage(dependency.dstAccessMask, dependency.dstStageMask)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDependency-dstAccessMask-00869",
"CreateRenderPass: pDependencies[%u].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", i,
dependency.dstAccessMask, dependency.dstStageMask);
}
}
if (!skip) {
skip |= ValidateLayouts(dev_data, device, pCreateInfo);
}
return skip;
}
// Style note:
// Use of rvalue reference exceeds reccommended usage of rvalue refs in google style guide, but intentionally forces caller to move
// or copy. This is clearer than passing a pointer to shared_ptr and avoids the atomic increment/decrement of shared_ptr copy
// construction or assignment.
static void PostCallRecordCreateRenderPass(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo,
const VkRenderPass render_pass_handle,
std::shared_ptr<RENDER_PASS_STATE> &&render_pass) {
render_pass->renderPass = render_pass_handle;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false);
// resolve attachments are considered to be written
if (subpass.pResolveAttachments) {
MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false);
}
}
if (subpass.pDepthStencilAttachment) {
MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false);
}
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true);
}
}
// Even though render_pass is an rvalue-ref parameter, still must move s.t. move assignment is invoked.
dev_data->renderPassMap[render_pass_handle] = std::move(render_pass);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// If we fail, this will act like a unique_ptr and auto-cleanup, as we aren't saving it anywhere
auto render_pass = std::make_shared<RENDER_PASS_STATE>(pCreateInfo);
unique_lock_t lock(global_lock);
skip = PreCallValidateCreateRenderPass(dev_data, device, pCreateInfo, render_pass.get());
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateRenderPass(dev_data, pCreateInfo, *pRenderPass, std::move(render_pass));
}
return result;
}
static bool ValidatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, char const *cmd_name,
std::string error_code) {
bool skip = false;
if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), error_code, "Cannot execute command %s on a secondary command buffer.",
cmd_name);
}
return skip;
}
static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
bool skip = false;
const safe_VkFramebufferCreateInfo *pFramebufferInfo =
&GetFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
if (pRenderPassBegin->renderArea.offset.x < 0 ||
(pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
pRenderPassBegin->renderArea.offset.y < 0 ||
(pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
skip |= static_cast<bool>(log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderArea,
"Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width "
"%d, height %d. Framebuffer: width %d, height %d.",
pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
}
return skip;
}
// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
// [load|store]Op flag must be checked
// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
template <typename T>
static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
if (color_depth_op != op && stencil_op != op) {
return false;
}
bool check_color_depth_load_op = !FormatIsStencilOnly(format);
bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op)));
}
static bool PreCallValidateCmdBeginRenderPass(layer_data *dev_data, const RENDER_PASS_STATE *render_pass_state,
GLOBAL_CB_NODE *cb_state, const FRAMEBUFFER_STATE *framebuffer,
const VkRenderPassBeginInfo *pRenderPassBegin) {
assert(cb_state);
bool skip = false;
if (render_pass_state) {
uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
VK_ATTACHMENT_LOAD_OP_CLEAR)) {
clear_op_size = static_cast<uint32_t>(i) + 1;
}
}
if (clear_op_size > pRenderPassBegin->clearValueCount) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(render_pass_state->renderPass), "VUID-VkRenderPassBeginInfo-clearValueCount-00902",
"In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there "
"must be at least %u entries in pClearValues array to account for the highest index attachment in "
"renderPass 0x%" PRIx64
" that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by "
"attachment number so even if some pClearValues entries between 0 and %u correspond to attachments "
"that aren't cleared they will be ignored.",
pRenderPassBegin->clearValueCount, clear_op_size, HandleToUint64(render_pass_state->renderPass),
clear_op_size, clear_op_size - 1);
}
skip |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
skip |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_state, pRenderPassBegin,
GetFramebufferState(dev_data, pRenderPassBegin->framebuffer));
if (framebuffer->rp_state->renderPass != render_pass_state->renderPass) {
skip |= ValidateRenderPassCompatibility(dev_data, "render pass", render_pass_state, "framebuffer",
framebuffer->rp_state.get(), "vkCmdBeginRenderPass()",
"VUID-VkRenderPassBeginInfo-renderPass-00904");
}
skip |= InsideRenderPass(dev_data, cb_state, "vkCmdBeginRenderPass()", "VUID-vkCmdBeginRenderPass-renderpass");
skip |= ValidateDependencies(dev_data, framebuffer, render_pass_state);
skip |= ValidatePrimaryCommandBuffer(dev_data, cb_state, "vkCmdBeginRenderPass()", "VUID-vkCmdBeginRenderPass-bufferlevel");
skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBeginRenderPass()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBeginRenderPass-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
}
return skip;
}
static void PreCallRecordCmdBeginRenderPass(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *framebuffer,
RENDER_PASS_STATE *render_pass_state, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassContents contents) {
assert(cb_state);
if (render_pass_state) {
cb_state->activeFramebuffer = pRenderPassBegin->framebuffer;
cb_state->activeRenderPass = render_pass_state;
// This is a shallow copy as that is all that is needed for now
cb_state->activeRenderPassBeginInfo = *pRenderPassBegin;
cb_state->activeSubpass = 0;
cb_state->activeSubpassContents = contents;
cb_state->framebuffers.insert(pRenderPassBegin->framebuffer);
// Connect this framebuffer and its children to this cmdBuffer
AddFramebufferBinding(dev_data, cb_state, framebuffer);
// Connect this RP to cmdBuffer
AddCommandBufferBinding(&render_pass_state->cb_bindings,
{HandleToUint64(render_pass_state->renderPass), kVulkanObjectTypeRenderPass}, cb_state);
// transition attachments to the correct layouts for beginning of renderPass and first subpass
TransitionBeginRenderPassLayouts(dev_data, cb_state, render_pass_state, framebuffer);
}
}
VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
auto framebuffer = pRenderPassBegin ? GetFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
if (cb_state) {
skip |= PreCallValidateCmdBeginRenderPass(dev_data, render_pass_state, cb_state, framebuffer, pRenderPassBegin);
if (!skip) {
PreCallRecordCmdBeginRenderPass(dev_data, cb_state, framebuffer, render_pass_state, pRenderPassBegin, contents);
}
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
}
}
static bool PreCallValidateCmdNextSubpass(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidatePrimaryCommandBuffer(dev_data, cb_state, "vkCmdNextSubpass()", "VUID-vkCmdNextSubpass-bufferlevel");
skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdNextSubpass()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdNextSubpass-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
skip |= OutsideRenderPass(dev_data, cb_state, "vkCmdNextSubpass()", "VUID-vkCmdNextSubpass-renderpass");
auto subpassCount = cb_state->activeRenderPass->createInfo.subpassCount;
if (cb_state->activeSubpass == subpassCount - 1) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdNextSubpass-None-00909",
"vkCmdNextSubpass(): Attempted to advance beyond final subpass.");
}
return skip;
}
static void PostCallRecordCmdNextSubpass(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkSubpassContents contents) {
cb_node->activeSubpass++;
cb_node->activeSubpassContents = contents;
TransitionSubpassLayouts(dev_data, cb_node, cb_node->activeRenderPass, cb_node->activeSubpass,
GetFramebufferState(dev_data, cb_node->activeRenderPassBeginInfo.framebuffer));
}
VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdNextSubpass(dev_data, pCB, commandBuffer);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
if (pCB) {
lock.lock();
PostCallRecordCmdNextSubpass(dev_data, pCB, contents);
}
}
static bool PreCallValidateCmdEndRenderPass(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = false;
RENDER_PASS_STATE *rp_state = cb_state->activeRenderPass;
if (rp_state) {
if (cb_state->activeSubpass != rp_state->createInfo.subpassCount - 1) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdEndRenderPass-None-00910",
"vkCmdEndRenderPass(): Called before reaching final subpass.");
}
}
skip |= OutsideRenderPass(dev_data, cb_state, "vkCmdEndRenderpass()", "VUID-vkCmdEndRenderPass-renderpass");
skip |= ValidatePrimaryCommandBuffer(dev_data, cb_state, "vkCmdEndRenderPass()", "VUID-vkCmdEndRenderPass-bufferlevel");
skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdEndRenderPass()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdEndRenderPass-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
return skip;
}
static void PostCallRecordCmdEndRenderPass(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
FRAMEBUFFER_STATE *framebuffer = GetFramebufferState(dev_data, cb_state->activeFramebuffer);
TransitionFinalSubpassLayouts(dev_data, cb_state, &cb_state->activeRenderPassBeginInfo, framebuffer);
cb_state->activeRenderPass = nullptr;
cb_state->activeSubpass = 0;
cb_state->activeFramebuffer = VK_NULL_HANDLE;
}
VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdEndRenderPass(dev_data, pCB, commandBuffer);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
if (pCB) {
lock.lock();
PostCallRecordCmdEndRenderPass(dev_data, pCB);
}
}
static bool ValidateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB, const char *caller) {
bool skip = false;
if (!pSubCB->beginInfo.pInheritanceInfo) {
return skip;
}
VkFramebuffer primary_fb = pCB->activeFramebuffer;
VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
if (secondary_fb != VK_NULL_HANDLE) {
if (primary_fb != secondary_fb) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(primaryBuffer), "VUID-vkCmdExecuteCommands-pCommandBuffers-00099",
"vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64
" which has a framebuffer 0x%" PRIx64
" that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ".",
HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb), HandleToUint64(primary_fb));
}
auto fb = GetFramebufferState(dev_data, secondary_fb);
if (!fb) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(primaryBuffer), kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
"vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
" which has invalid framebuffer 0x%" PRIx64 ".",
HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb));
return skip;
}
}
return skip;
}
static bool ValidateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
bool skip = false;
unordered_set<int> activeTypes;
for (auto queryObject : pCB->activeQueries) {
auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
if (queryPoolData != dev_data->queryPoolMap.end()) {
if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
pSubCB->beginInfo.pInheritanceInfo) {
VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), "VUID-vkCmdExecuteCommands-commandBuffer-00104",
"vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
" which has invalid active query pool 0x%" PRIx64
". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.",
HandleToUint64(pCB->commandBuffer), HandleToUint64(queryPoolData->first));
}
}
activeTypes.insert(queryPoolData->second.createInfo.queryType);
}
}
for (auto queryObject : pSubCB->startedQueries) {
auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
"vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
" which has invalid active query pool 0x%" PRIx64
" of type %d but a query of that type has been started on secondary Cmd Buffer 0x%" PRIx64 ".",
HandleToUint64(pCB->commandBuffer), HandleToUint64(queryPoolData->first),
queryPoolData->second.createInfo.queryType, HandleToUint64(pSubCB->commandBuffer));
}
}
auto primary_pool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
auto secondary_pool = GetCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pSubCB->commandBuffer), kVUID_Core_DrawState_InvalidQueueFamily,
"vkCmdExecuteCommands(): Primary command buffer 0x%" PRIx64
" created in queue family %d has secondary command buffer 0x%" PRIx64 " created in queue family %d.",
HandleToUint64(pCB->commandBuffer), primary_pool->queueFamilyIndex, HandleToUint64(pSubCB->commandBuffer),
secondary_pool->queueFamilyIndex);
}
return skip;
}
static bool PreCallValidateCmdExecuteCommands(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
bool skip = false;
GLOBAL_CB_NODE *sub_cb_state = NULL;
for (uint32_t i = 0; i < commandBuffersCount; i++) {
sub_cb_state = GetCBNode(dev_data, pCommandBuffers[i]);
assert(sub_cb_state);
if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == sub_cb_state->createInfo.level) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-pCommandBuffers-00088",
"vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%" PRIx64
" in element %u of pCommandBuffers array. All cmd buffers in pCommandBuffers array must be secondary.",
HandleToUint64(pCommandBuffers[i]), i);
} else if (cb_state->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
if (sub_cb_state->beginInfo.pInheritanceInfo != nullptr) {
auto secondary_rp_state = GetRenderPassState(dev_data, sub_cb_state->beginInfo.pInheritanceInfo->renderPass);
if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-pCommandBuffers-00096",
"vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64 ") executed within render pass (0x%" PRIx64
") must have had vkBeginCommandBuffer() called w/ "
"VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
HandleToUint64(pCommandBuffers[i]), HandleToUint64(cb_state->activeRenderPass->renderPass));
} else {
// Make sure render pass is compatible with parent command buffer pass if has continue
if (cb_state->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
skip |= ValidateRenderPassCompatibility(
dev_data, "primary command buffer", cb_state->activeRenderPass, "secondary command buffer",
secondary_rp_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-pInheritanceInfo-00098");
}
// If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
skip |= ValidateFramebuffer(dev_data, commandBuffer, cb_state, pCommandBuffers[i], sub_cb_state,
"vkCmdExecuteCommands()");
if (!sub_cb_state->cmd_execute_commands_functions.empty()) {
// Inherit primary's activeFramebuffer and while running validate functions
for (auto &function : sub_cb_state->cmd_execute_commands_functions) {
skip |= function(cb_state, cb_state->activeFramebuffer);
}
}
}
}
}
// TODO(mlentine): Move more logic into this method
skip |= ValidateSecondaryCommandBufferState(dev_data, cb_state, sub_cb_state);
skip |= ValidateCommandBufferState(dev_data, sub_cb_state, "vkCmdExecuteCommands()", 0,
"VUID-vkCmdExecuteCommands-pCommandBuffers-00089");
if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
if (sub_cb_state->in_use.load() || cb_state->linkedCommandBuffers.count(sub_cb_state)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdExecuteCommands-pCommandBuffers-00090",
"Attempt to simultaneously execute command buffer 0x%" PRIx64
" without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!",
HandleToUint64(cb_state->commandBuffer));
}
if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
// Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), kVUID_Core_DrawState_InvalidCommandBufferSimultaneousUse,
"vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary "
"command buffer (0x%" PRIx64
") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even "
"though it does.",
HandleToUint64(pCommandBuffers[i]), HandleToUint64(cb_state->commandBuffer));
// TODO: Clearing the VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT needs to be moved from the validation step to the
// recording step
cb_state->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
}
}
if (!cb_state->activeQueries.empty() && !dev_data->enabled_features.core.inheritedQueries) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-commandBuffer-00101",
"vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
") cannot be submitted with a query in flight and inherited queries not supported on this device.",
HandleToUint64(pCommandBuffers[i]));
}
// Propagate layout transitions to the primary cmd buffer
// Novel Valid usage: "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001"
// initial layout usage of secondary command buffers resources must match parent command buffer
for (const auto &ilm_entry : sub_cb_state->imageLayoutMap) {
auto cb_entry = cb_state->imageLayoutMap.find(ilm_entry.first);
if (cb_entry != cb_state->imageLayoutMap.end()) {
// For exact matches ImageSubresourcePair matches, validate and update the parent entry
if ((VK_IMAGE_LAYOUT_UNDEFINED != ilm_entry.second.initialLayout) &&
(cb_entry->second.layout != ilm_entry.second.initialLayout)) {
const VkImageSubresource &subresource = ilm_entry.first.subresource;
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001",
"%s: Cannot execute cmd buffer using image (0x%" PRIx64
") [sub-resource: aspectMask 0x%X "
"array layer %u, mip level %u], with current layout %s when first use is %s.",
"vkCmdExecuteCommands():", HandleToUint64(ilm_entry.first.image), subresource.aspectMask,
subresource.arrayLayer, subresource.mipLevel, string_VkImageLayout(cb_entry->second.layout),
string_VkImageLayout(ilm_entry.second.initialLayout));
}
} else {
// Look for partial matches (in aspectMask), and update or create parent map entry in SetLayout
assert(ilm_entry.first.hasSubresource);
IMAGE_CMD_BUF_LAYOUT_NODE node;
if (FindCmdBufLayout(dev_data, cb_state, ilm_entry.first.image, ilm_entry.first.subresource, node)) {
if ((VK_IMAGE_LAYOUT_UNDEFINED != ilm_entry.second.initialLayout) &&
(node.layout != ilm_entry.second.initialLayout)) {
const VkImageSubresource &subresource = ilm_entry.first.subresource;
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
"UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001",
"%s: Cannot execute cmd buffer using image (0x%" PRIx64
") [sub-resource: aspectMask 0x%X "
"array layer %u, mip level %u], with current layout %s when first use is %s.",
"vkCmdExecuteCommands():", HandleToUint64(ilm_entry.first.image), subresource.aspectMask,
subresource.arrayLayer, subresource.mipLevel, string_VkImageLayout(node.layout),
string_VkImageLayout(ilm_entry.second.initialLayout));
}
}
}
}
// TODO: Linking command buffers here is necessary to pass existing validation tests--however, this state change still needs
// to be removed from the validation step
sub_cb_state->primaryCommandBuffer = cb_state->commandBuffer;
cb_state->linkedCommandBuffers.insert(sub_cb_state);
sub_cb_state->linkedCommandBuffers.insert(cb_state);
}
skip |= ValidatePrimaryCommandBuffer(dev_data, cb_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-bufferlevel");
skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdExecuteCommands()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdExecuteCommands-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
return skip;
}
static void PreCallRecordCmdExecuteCommands(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, uint32_t commandBuffersCount,
const VkCommandBuffer *pCommandBuffers) {
GLOBAL_CB_NODE *sub_cb_state = NULL;
for (uint32_t i = 0; i < commandBuffersCount; i++) {
sub_cb_state = GetCBNode(dev_data, pCommandBuffers[i]);
assert(sub_cb_state);
if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
// TODO: Because this is a state change, clearing the VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT needs to be moved
// from the validation step to the recording step
cb_state->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
}
}
// Propagate layout transitions to the primary cmd buffer
// Novel Valid usage: "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001"
// initial layout usage of secondary command buffers resources must match parent command buffer
for (const auto &ilm_entry : sub_cb_state->imageLayoutMap) {
auto cb_entry = cb_state->imageLayoutMap.find(ilm_entry.first);
if (cb_entry != cb_state->imageLayoutMap.end()) {
// For exact matches ImageSubresourcePair matches, update the parent entry
cb_entry->second.layout = ilm_entry.second.layout;
} else {
// Look for partial matches (in aspectMask), and update or create parent map entry in SetLayout
assert(ilm_entry.first.hasSubresource);
IMAGE_CMD_BUF_LAYOUT_NODE node;
if (!FindCmdBufLayout(dev_data, cb_state, ilm_entry.first.image, ilm_entry.first.subresource, node)) {
node.initialLayout = ilm_entry.second.initialLayout;
}
node.layout = ilm_entry.second.layout;
SetLayout(dev_data, cb_state, ilm_entry.first, node);
}
}
sub_cb_state->primaryCommandBuffer = cb_state->commandBuffer;
cb_state->linkedCommandBuffers.insert(sub_cb_state);
sub_cb_state->linkedCommandBuffers.insert(cb_state);
for (auto &function : sub_cb_state->queryUpdates) {
cb_state->queryUpdates.push_back(function);
}
for (auto &function : sub_cb_state->queue_submit_functions) {
cb_state->queue_submit_functions.push_back(function);
}
}
}
VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
const VkCommandBuffer *pCommandBuffers) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
// TODO: State changes needs to be untangled from validation in PreCallValidationCmdExecuteCommands()
skip |= PreCallValidateCmdExecuteCommands(dev_data, cb_state, commandBuffer, commandBuffersCount, pCommandBuffers);
PreCallRecordCmdExecuteCommands(dev_data, cb_state, commandBuffersCount, pCommandBuffers);
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
}
static bool PreCallValidateMapMemory(layer_data *dev_data, VkDevice device, VkDeviceMemory mem, VkDeviceSize offset,
VkDeviceSize size) {
bool skip = false;
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
skip |= ValidateMapImageLayouts(dev_data, device, mem_info, offset, end_offset);
if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), "VUID-vkMapMemory-memory-00682",
"Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIx64 ".",
HandleToUint64(mem));
}
}
skip |= ValidateMapMemRange(dev_data, mem, offset, size);
return skip;
}
static void PostCallRecordMapMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
void **ppData) {
// TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
StoreMemRanges(dev_data, mem, offset, size);
InitializeAndTrackMemory(dev_data, mem, offset, size, ppData);
}
VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
void **ppData) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateMapMemory(dev_data, device, mem, offset, size);
lock.unlock();
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
if (!skip) {
result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordMapMemory(dev_data, mem, offset, size, ppData);
lock.unlock();
}
}
return result;
}
static bool PreCallValidateUnmapMemory(const layer_data *dev_data, DEVICE_MEM_INFO *mem_info, const VkDeviceMemory mem) {
bool skip = false;
if (!mem_info->mem_range.size) {
// Valid Usage: memory must currently be mapped
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), "VUID-vkUnmapMemory-memory-00689",
"Unmapping Memory without memory being mapped: mem obj 0x%" PRIx64 ".", HandleToUint64(mem));
}
return skip;
}
static void PreCallRecordUnmapMemory(DEVICE_MEM_INFO *mem_info) {
mem_info->mem_range.size = 0;
if (mem_info->shadow_copy) {
free(mem_info->shadow_copy_base);
mem_info->shadow_copy_base = 0;
mem_info->shadow_copy = 0;
}
}
VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
unique_lock_t lock(global_lock);
auto mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
skip |= PreCallValidateUnmapMemory(dev_data, mem_info, mem);
PreCallRecordUnmapMemory(mem_info);
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.UnmapMemory(device, mem);
}
}
static bool ValidateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) {
bool skip = false;
for (uint32_t i = 0; i < memRangeCount; ++i) {
auto mem_info = GetMemObjInfo(dev_data, pMemRanges[i].memory);
if (mem_info) {
if (pMemRanges[i].size == VK_WHOLE_SIZE) {
if (mem_info->mem_range.offset > pMemRanges[i].offset) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(pMemRanges[i].memory), "VUID-VkMappedMemoryRange-size-00686",
"%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").",
funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset));
}
} else {
const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
? mem_info->alloc_info.allocationSize
: (mem_info->mem_range.offset + mem_info->mem_range.size);
if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
(data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(pMemRanges[i].memory), "VUID-VkMappedMemoryRange-size-00685",
"%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").",
funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end));
}
}
}
}
return skip;
}
static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
const VkMappedMemoryRange *mem_ranges) {
bool skip = false;
for (uint32_t i = 0; i < mem_range_count; ++i) {
auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
if (mem_info) {
if (mem_info->shadow_copy) {
VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
? mem_info->mem_range.size
: (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
char *data = static_cast<char *>(mem_info->shadow_copy);
for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
if (data[j] != NoncoherentMemoryFillValue) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory),
kVUID_Core_MemTrack_InvalidMap, "Memory underflow was detected on mem obj 0x%" PRIx64,
HandleToUint64(mem_ranges[i].memory));
}
}
for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
if (data[j] != NoncoherentMemoryFillValue) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory),
kVUID_Core_MemTrack_InvalidMap, "Memory overflow was detected on mem obj 0x%" PRIx64,
HandleToUint64(mem_ranges[i].memory));
}
}
memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
}
}
}
return skip;
}
static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
for (uint32_t i = 0; i < mem_range_count; ++i) {
auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
if (mem_info && mem_info->shadow_copy) {
VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
? mem_info->mem_range.size
: (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
char *data = static_cast<char *>(mem_info->shadow_copy);
memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
}
}
}
static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
const VkMappedMemoryRange *mem_ranges) {
bool skip = false;
for (uint32_t i = 0; i < mem_range_count; ++i) {
uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_ranges->memory), "VUID-VkMappedMemoryRange-offset-00687",
"%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
func_name, i, mem_ranges[i].offset, atom_size);
}
auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
if ((mem_ranges[i].size != VK_WHOLE_SIZE) &&
(mem_ranges[i].size + mem_ranges[i].offset != mem_info->alloc_info.allocationSize) &&
(SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_ranges->memory), "VUID-VkMappedMemoryRange-size-01390",
"%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
func_name, i, mem_ranges[i].size, atom_size);
}
}
return skip;
}
static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
const VkMappedMemoryRange *mem_ranges) {
bool skip = false;
lock_guard_t lock(global_lock);
skip |= ValidateMappedMemoryRangeDeviceLimits(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
skip |= ValidateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
return skip;
}
VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
}
return result;
}
static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
const VkMappedMemoryRange *mem_ranges) {
bool skip = false;
lock_guard_t lock(global_lock);
skip |= ValidateMappedMemoryRangeDeviceLimits(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
skip |= ValidateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
return skip;
}
static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
const VkMappedMemoryRange *mem_ranges) {
lock_guard_t lock(global_lock);
// Update our shadow copy with modified driver data
CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
}
VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
if (result == VK_SUCCESS) {
PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
}
}
return result;
}
static bool PreCallValidateBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
VkDeviceSize memoryOffset, const char *api_name) {
bool skip = false;
if (image_state) {
unique_lock_t lock(global_lock);
// Track objects tied to memory
uint64_t image_handle = HandleToUint64(image);
skip = ValidateSetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, api_name);
if (!image_state->memory_requirements_checked) {
// There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
// BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
// vkGetImageMemoryRequirements()
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
image_handle, kVUID_Core_DrawState_InvalidImage,
"%s: Binding memory to image 0x%" PRIx64
" but vkGetImageMemoryRequirements() has not been called on that image.",
api_name, HandleToUint64(image_handle));
// Make the call for them so we can verify the state
lock.unlock();
dev_data->dispatch_table.GetImageMemoryRequirements(dev_data->device, image, &image_state->requirements);
lock.lock();
}
// Validate bound memory range information
auto mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
skip |= ValidateInsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, api_name);
skip |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, api_name,
"VUID-vkBindImageMemory-memory-01047");
}
// Validate memory requirements alignment
if (SafeModulo(memoryOffset, image_state->requirements.alignment) != 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
image_handle, "VUID-vkBindImageMemory-memoryOffset-01048",
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with image.",
api_name, memoryOffset, image_state->requirements.alignment);
}
if (mem_info) {
// Validate memory requirements size
if (image_state->requirements.size > mem_info->alloc_info.allocationSize - memoryOffset) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
image_handle, "VUID-vkBindImageMemory-size-01049",
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with image.",
api_name, mem_info->alloc_info.allocationSize - memoryOffset, image_state->requirements.size);
}
// Validate dedicated allocation
if (mem_info->is_dedicated && ((mem_info->dedicated_image != image) || (memoryOffset != 0))) {
// TODO: Add vkBindImageMemory2KHR error message when added to spec.
auto validation_error = kVUIDUndefined;
if (strcmp(api_name, "vkBindImageMemory()") == 0) {
validation_error = "VUID-vkBindImageMemory-memory-01509";
}
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
image_handle, validation_error,
"%s: for dedicated memory allocation 0x%" PRIxLEAST64
", VkMemoryDedicatedAllocateInfoKHR::image 0x%" PRIXLEAST64 " must be equal to image 0x%" PRIxLEAST64
" and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
api_name, HandleToUint64(mem), HandleToUint64(mem_info->dedicated_image), image_handle, memoryOffset);
}
}
}
return skip;
}
static void PostCallRecordBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
VkDeviceSize memoryOffset, const char *api_name) {
if (image_state) {
unique_lock_t lock(global_lock);
// Track bound memory range information
auto mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
}
// Track objects tied to memory
uint64_t image_handle = HandleToUint64(image);
SetMemBinding(dev_data, mem, image_state, memoryOffset, image_handle, kVulkanObjectTypeImage, api_name);
}
}
VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
IMAGE_STATE *image_state;
{
unique_lock_t lock(global_lock);
image_state = GetImageState(dev_data, image);
}
bool skip = PreCallValidateBindImageMemory(dev_data, image, image_state, mem, memoryOffset, "vkBindImageMemory()");
if (!skip) {
result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
if (result == VK_SUCCESS) {
PostCallRecordBindImageMemory(dev_data, image, image_state, mem, memoryOffset, "vkBindImageMemory()");
}
}
return result;
}
static bool PreCallValidateBindImageMemory2(layer_data *dev_data, std::vector<IMAGE_STATE *> *image_state, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR *pBindInfos) {
{
unique_lock_t lock(global_lock);
for (uint32_t i = 0; i < bindInfoCount; i++) {
(*image_state)[i] = GetImageState(dev_data, pBindInfos[i].image);
}
}
bool skip = false;
char api_name[128];
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindImageMemory2() pBindInfos[%u]", i);
skip |= PreCallValidateBindImageMemory(dev_data, pBindInfos[i].image, (*image_state)[i], pBindInfos[i].memory,
pBindInfos[i].memoryOffset, api_name);
}
return skip;
}
static void PostCallRecordBindImageMemory2(layer_data *dev_data, const std::vector<IMAGE_STATE *> &image_state,
uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos) {
for (uint32_t i = 0; i < bindInfoCount; i++) {
PostCallRecordBindImageMemory(dev_data, pBindInfos[i].image, image_state[i], pBindInfos[i].memory,
pBindInfos[i].memoryOffset, "vkBindImageMemory2()");
}
}
VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR *pBindInfos) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
std::vector<IMAGE_STATE *> image_state(bindInfoCount);
if (!PreCallValidateBindImageMemory2(dev_data, &image_state, bindInfoCount, pBindInfos)) {
result = dev_data->dispatch_table.BindImageMemory2(device, bindInfoCount, pBindInfos);
if (result == VK_SUCCESS) {
PostCallRecordBindImageMemory2(dev_data, image_state, bindInfoCount, pBindInfos);
}
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR *pBindInfos) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
std::vector<IMAGE_STATE *> image_state(bindInfoCount);
if (!PreCallValidateBindImageMemory2(dev_data, &image_state, bindInfoCount, pBindInfos)) {
result = dev_data->dispatch_table.BindImageMemory2KHR(device, bindInfoCount, pBindInfos);
if (result == VK_SUCCESS) {
PostCallRecordBindImageMemory2(dev_data, image_state, bindInfoCount, pBindInfos);
}
}
return result;
}
static bool PreCallValidateSetEvent(layer_data *dev_data, VkEvent event) {
bool skip = false;
auto event_state = GetEventNode(dev_data, event);
if (event_state) {
event_state->needsSignaled = false;
event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
if (event_state->write_in_use) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
HandleToUint64(event), kVUID_Core_DrawState_QueueForwardProgress,
"Cannot call vkSetEvent() on event 0x%" PRIx64 " that is already in use by a command buffer.",
HandleToUint64(event));
}
}
return skip;
}
static void PreCallRecordSetEvent(layer_data *dev_data, VkEvent event) {
// Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
// TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
// ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
for (auto queue_data : dev_data->queueMap) {
auto event_entry = queue_data.second.eventToStageMap.find(event);
if (event_entry != queue_data.second.eventToStageMap.end()) {
event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateSetEvent(dev_data, event);
PreCallRecordSetEvent(dev_data, event);
lock.unlock();
if (!skip) result = dev_data->dispatch_table.SetEvent(device, event);
return result;
}
static bool PreCallValidateQueueBindSparse(layer_data *dev_data, VkQueue queue, uint32_t bindInfoCount,
const VkBindSparseInfo *pBindInfo, VkFence fence) {
auto pFence = GetFenceNode(dev_data, fence);
bool skip = ValidateFenceForSubmit(dev_data, pFence);
if (skip) {
return true;
}
unordered_set<VkSemaphore> signaled_semaphores;
unordered_set<VkSemaphore> unsignaled_semaphores;
unordered_set<VkSemaphore> internal_semaphores;
for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
std::vector<SEMAPHORE_WAIT> semaphore_waits;
std::vector<VkSemaphore> semaphore_signals;
for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (unsignaled_semaphores.count(semaphore) ||
(!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
HandleToUint64(queue), HandleToUint64(semaphore));
} else {
signaled_semaphores.erase(semaphore);
unsignaled_semaphores.insert(semaphore);
}
}
if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
internal_semaphores.insert(semaphore);
}
}
for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
" that was previously signaled by queue 0x%" PRIx64
" but has not since been waited on by any queue.",
HandleToUint64(queue), HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
} else {
unsignaled_semaphores.erase(semaphore);
signaled_semaphores.insert(semaphore);
}
}
}
// Store sparse binding image_state and after binding is complete make sure that any requiring metadata have it bound
std::unordered_set<IMAGE_STATE *> sparse_images;
// If we're binding sparse image memory make sure reqs were queried and note if metadata is required and bound
for (uint32_t i = 0; i < bindInfo.imageBindCount; ++i) {
const auto &image_bind = bindInfo.pImageBinds[i];
auto image_state = GetImageState(dev_data, image_bind.image);
if (!image_state)
continue; // Param/Object validation should report image_bind.image handles being invalid, so just skip here.
sparse_images.insert(image_state);
if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
// For now just warning if sparse image binding occurs without calling to get reqs first
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState,
"vkQueueBindSparse(): Binding sparse memory to image 0x%" PRIx64
" without first calling vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
HandleToUint64(image_state->image));
}
for (uint32_t j = 0; j < image_bind.bindCount; ++j) {
if (image_bind.pBinds[j].flags & VK_IMAGE_ASPECT_METADATA_BIT) {
image_state->sparse_metadata_bound = true;
}
}
}
for (uint32_t i = 0; i < bindInfo.imageOpaqueBindCount; ++i) {
auto image_state = GetImageState(dev_data, bindInfo.pImageOpaqueBinds[i].image);
if (!image_state)
continue; // Param/Object validation should report image_bind.image handles being invalid, so just skip here.
sparse_images.insert(image_state);
if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
// For now just warning if sparse image binding occurs without calling to get reqs first
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState,
"vkQueueBindSparse(): Binding opaque sparse memory to image 0x%" PRIx64
" without first calling vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
HandleToUint64(image_state->image));
}
}
for (const auto &sparse_image_state : sparse_images) {
if (sparse_image_state->sparse_metadata_required && !sparse_image_state->sparse_metadata_bound) {
// Warn if sparse image binding metadata required for image with sparse binding, but metadata not bound
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(sparse_image_state->image), kVUID_Core_MemTrack_InvalidState,
"vkQueueBindSparse(): Binding sparse memory to image 0x%" PRIx64
" which requires a metadata aspect but no binding with VK_IMAGE_ASPECT_METADATA_BIT set was made.",
HandleToUint64(sparse_image_state->image));
}
}
}
return skip;
}
static void PostCallRecordQueueBindSparse(layer_data *dev_data, VkQueue queue, uint32_t bindInfoCount,
const VkBindSparseInfo *pBindInfo, VkFence fence) {
uint64_t early_retire_seq = 0;
auto pFence = GetFenceNode(dev_data, fence);
auto pQueue = GetQueueState(dev_data, queue);
if (pFence) {
if (pFence->scope == kSyncScopeInternal) {
SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount));
if (!bindInfoCount) {
// No work to do, just dropping a fence in the queue by itself.
pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
}
} else {
// Retire work up until this fence early, we will not see the wait that corresponds to this signal
early_retire_seq = pQueue->seq + pQueue->submissions.size();
if (!dev_data->external_sync_warning) {
dev_data->external_sync_warning = true;
log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(fence), kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueBindSparse(): Signaling external fence 0x%" PRIx64 " on queue 0x%" PRIx64
" will disable validation of preceding command buffer lifecycle states and the in-use status of associated "
"objects.",
HandleToUint64(fence), HandleToUint64(queue));
}
}
}
for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
// Track objects tied to memory
for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
HandleToUint64(bindInfo.pBufferBinds[j].buffer), kVulkanObjectTypeBuffer);
}
}
for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
HandleToUint64(bindInfo.pImageOpaqueBinds[j].image), kVulkanObjectTypeImage);
}
}
for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
// TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
HandleToUint64(bindInfo.pImageBinds[j].image), kVulkanObjectTypeImage);
}
}
std::vector<SEMAPHORE_WAIT> semaphore_waits;
std::vector<VkSemaphore> semaphore_signals;
std::vector<VkSemaphore> semaphore_externals;
for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore) {
if (pSemaphore->scope == kSyncScopeInternal) {
if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
pSemaphore->in_use.fetch_add(1);
}
pSemaphore->signaler.first = VK_NULL_HANDLE;
pSemaphore->signaled = false;
} else {
semaphore_externals.push_back(semaphore);
pSemaphore->in_use.fetch_add(1);
if (pSemaphore->scope == kSyncScopeExternalTemporary) {
pSemaphore->scope = kSyncScopeInternal;
}
}
}
}
for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore) {
if (pSemaphore->scope == kSyncScopeInternal) {
pSemaphore->signaler.first = queue;
pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
pSemaphore->signaled = true;
pSemaphore->in_use.fetch_add(1);
semaphore_signals.push_back(semaphore);
} else {
// Retire work up until this submit early, we will not see the wait that corresponds to this signal
early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
if (!dev_data->external_sync_warning) {
dev_data->external_sync_warning = true;
log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueBindSparse(): Signaling external semaphore 0x%" PRIx64 " on queue 0x%" PRIx64
" will disable validation of preceding command buffer lifecycle states and the in-use status of "
"associated objects.",
HandleToUint64(semaphore), HandleToUint64(queue));
}
}
}
}
pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals, semaphore_externals,
bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
}
if (early_retire_seq) {
RetireWorkOnQueue(dev_data, pQueue, early_retire_seq);
}
}
VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
VkFence fence) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateQueueBindSparse(dev_data, queue, bindInfoCount, pBindInfo, fence);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
lock.lock();
PostCallRecordQueueBindSparse(dev_data, queue, bindInfoCount, pBindInfo, fence);
lock.unlock();
return result;
}
static void PostCallRecordCreateSemaphore(layer_data *dev_data, VkSemaphore *pSemaphore) {
SEMAPHORE_NODE *sNode = &dev_data->semaphoreMap[*pSemaphore];
sNode->signaler.first = VK_NULL_HANDLE;
sNode->signaler.second = 0;
sNode->signaled = false;
sNode->scope = kSyncScopeInternal;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
if (result == VK_SUCCESS) {
lock_guard_t lock(global_lock);
PostCallRecordCreateSemaphore(dev_data, pSemaphore);
}
return result;
}
static bool PreCallValidateImportSemaphore(layer_data *dev_data, VkSemaphore semaphore, const char *caller_name) {
SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
VK_OBJECT obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
bool skip = false;
if (sema_node) {
skip |= ValidateObjectNotInUse(dev_data, sema_node, obj_struct, caller_name, kVUIDUndefined);
}
return skip;
}
static void PostCallRecordImportSemaphore(layer_data *dev_data, VkSemaphore semaphore,
VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type, VkSemaphoreImportFlagsKHR flags) {
SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
if (sema_node && sema_node->scope != kSyncScopeExternalPermanent) {
if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) &&
sema_node->scope == kSyncScopeInternal) {
sema_node->scope = kSyncScopeExternalTemporary;
} else {
sema_node->scope = kSyncScopeExternalPermanent;
}
}
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
VKAPI_ATTR VkResult VKAPI_CALL
ImportSemaphoreWin32HandleKHR(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip =
PreCallValidateImportSemaphore(dev_data, pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR");
if (!skip) {
result = dev_data->dispatch_table.ImportSemaphoreWin32HandleKHR(device, pImportSemaphoreWin32HandleInfo);
}
if (result == VK_SUCCESS) {
PostCallRecordImportSemaphore(dev_data, pImportSemaphoreWin32HandleInfo->semaphore,
pImportSemaphoreWin32HandleInfo->handleType, pImportSemaphoreWin32HandleInfo->flags);
}
return result;
}
#endif
VKAPI_ATTR VkResult VKAPI_CALL ImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = PreCallValidateImportSemaphore(dev_data, pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR");
if (!skip) {
result = dev_data->dispatch_table.ImportSemaphoreFdKHR(device, pImportSemaphoreFdInfo);
}
if (result == VK_SUCCESS) {
PostCallRecordImportSemaphore(dev_data, pImportSemaphoreFdInfo->semaphore, pImportSemaphoreFdInfo->handleType,
pImportSemaphoreFdInfo->flags);
}
return result;
}
static void PostCallRecordGetSemaphore(layer_data *dev_data, VkSemaphore semaphore,
VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type) {
SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
if (sema_node && handle_type != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
// Cannot track semaphore state once it is exported, except for Sync FD handle types which have copy transference
sema_node->scope = kSyncScopeExternalPermanent;
}
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreWin32HandleKHR(VkDevice device,
const VkSemaphoreGetWin32HandleInfoKHR *pGetWin32HandleInfo,
HANDLE *pHandle) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.GetSemaphoreWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
if (result == VK_SUCCESS) {
PostCallRecordGetSemaphore(dev_data, pGetWin32HandleInfo->semaphore, pGetWin32HandleInfo->handleType);
}
return result;
}
#endif
VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR *pGetFdInfo, int *pFd) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.GetSemaphoreFdKHR(device, pGetFdInfo, pFd);
if (result == VK_SUCCESS) {
PostCallRecordGetSemaphore(dev_data, pGetFdInfo->semaphore, pGetFdInfo->handleType);
}
return result;
}
static bool PreCallValidateImportFence(layer_data *dev_data, VkFence fence, const char *caller_name) {
FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
bool skip = false;
if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(fence), kVUIDUndefined, "Cannot call %s on fence 0x%" PRIx64 " that is currently in use.",
caller_name, HandleToUint64(fence));
}
return skip;
}
static void PostCallRecordImportFence(layer_data *dev_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type,
VkFenceImportFlagsKHR flags) {
FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
if (fence_node && fence_node->scope != kSyncScopeExternalPermanent) {
if ((handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) &&
fence_node->scope == kSyncScopeInternal) {
fence_node->scope = kSyncScopeExternalTemporary;
} else {
fence_node->scope = kSyncScopeExternalPermanent;
}
}
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
VKAPI_ATTR VkResult VKAPI_CALL ImportFenceWin32HandleKHR(VkDevice device,
const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = PreCallValidateImportFence(dev_data, pImportFenceWin32HandleInfo->fence, "vkImportFenceWin32HandleKHR");
if (!skip) {
result = dev_data->dispatch_table.ImportFenceWin32HandleKHR(device, pImportFenceWin32HandleInfo);
}
if (result == VK_SUCCESS) {
PostCallRecordImportFence(dev_data, pImportFenceWin32HandleInfo->fence, pImportFenceWin32HandleInfo->handleType,
pImportFenceWin32HandleInfo->flags);
}
return result;
}
#endif
VKAPI_ATTR VkResult VKAPI_CALL ImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = PreCallValidateImportFence(dev_data, pImportFenceFdInfo->fence, "vkImportFenceFdKHR");
if (!skip) {
result = dev_data->dispatch_table.ImportFenceFdKHR(device, pImportFenceFdInfo);
}
if (result == VK_SUCCESS) {
PostCallRecordImportFence(dev_data, pImportFenceFdInfo->fence, pImportFenceFdInfo->handleType, pImportFenceFdInfo->flags);
}
return result;
}
static void PostCallRecordGetFence(layer_data *dev_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type) {
FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
if (fence_node) {
if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
// Export with reference transference becomes external
fence_node->scope = kSyncScopeExternalPermanent;
} else if (fence_node->scope == kSyncScopeInternal) {
// Export with copy transference has a side effect of resetting the fence
fence_node->state = FENCE_UNSIGNALED;
}
}
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
VKAPI_ATTR VkResult VKAPI_CALL GetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR *pGetWin32HandleInfo,
HANDLE *pHandle) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.GetFenceWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
if (result == VK_SUCCESS) {
PostCallRecordGetFence(dev_data, pGetWin32HandleInfo->fence, pGetWin32HandleInfo->handleType);
}
return result;
}
#endif
VKAPI_ATTR VkResult VKAPI_CALL GetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR *pGetFdInfo, int *pFd) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.GetFenceFdKHR(device, pGetFdInfo, pFd);
if (result == VK_SUCCESS) {
PostCallRecordGetFence(dev_data, pGetFdInfo->fence, pGetFdInfo->handleType);
}
return result;
}
static void PostCallRecordCreateEvent(layer_data *dev_data, VkEvent *pEvent) {
dev_data->eventMap[*pEvent].needsSignaled = false;
dev_data->eventMap[*pEvent].write_in_use = 0;
dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
if (result == VK_SUCCESS) {
lock_guard_t lock(global_lock);
PostCallRecordCreateEvent(dev_data, pEvent);
}
return result;
}
static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
SWAPCHAIN_NODE *old_swapchain_state) {
auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
// TODO: revisit this. some of these rules are being relaxed.
// All physical devices and queue families are required to be able
// to present to any native window on Android; require the
// application to have established support on any other platform.
if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
auto support_predicate = [dev_data](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool {
// TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
return (qs.first.gpu == dev_data->physical_device) && qs.second;
};
const auto &support = surface_state->gpu_queue_support;
bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
if (!is_supported) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-surface-01270",
"%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The "
"vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with "
"this surface for at least one queue family of this device.",
func_name))
return true;
}
}
if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), kVUID_Core_DrawState_SwapchainAlreadyExists,
"%s: surface has an existing swapchain other than oldSwapchain", func_name))
return true;
}
if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pCreateInfo->oldSwapchain), kVUID_Core_DrawState_SwapchainWrongSurface,
"%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
return true;
}
if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-imageExtent-01689",
"%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.", func_name, pCreateInfo->imageExtent.width,
pCreateInfo->imageExtent.height))
return true;
}
auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(dev_data->physical_device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery,
"%s: surface capabilities not retrieved for this physical device", func_name))
return true;
} else { // have valid capabilities
auto &capabilities = physical_device_state->surfaceCapabilities;
// Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
if (pCreateInfo->minImageCount < capabilities.minImageCount) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-minImageCount-01271",
"%s called with minImageCount = %d, which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
return true;
}
if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-minImageCount-01272",
"%s called with minImageCount = %d, which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
return true;
}
// Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
(pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
(pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
(pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-imageExtent-01274",
"%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
"maxImageExtent = (%d,%d).",
func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height))
return true;
}
// pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
// VkSurfaceCapabilitiesKHR::supportedTransforms.
if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
!(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
// This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
// it up a little at a time, and then log it:
std::string errorString = "";
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name,
string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
errorString += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & capabilities.supportedTransforms) {
const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
sprintf(str, " %s\n", newStr);
errorString += str;
}
}
// Log the message that we've built up:
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-preTransform-01279", "%s.",
errorString.c_str()))
return true;
}
// pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
// VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
!((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
// This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
// it up a little at a time, and then log it:
std::string errorString = "";
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n",
func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
errorString += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & capabilities.supportedCompositeAlpha) {
const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
sprintf(str, " %s\n", newStr);
errorString += str;
}
}
// Log the message that we've built up:
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-compositeAlpha-01280", "%s.",
errorString.c_str()))
return true;
}
// Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-imageArrayLayers-01275",
"%s called with a non-supported imageArrayLayers (i.e. %d). Maximum value is %d.", func_name,
pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers))
return true;
}
// Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-imageUsage-01276",
"%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x.",
func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags))
return true;
}
}
// Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery,
"%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
return true;
} else {
// Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
bool foundFormat = false;
bool foundColorSpace = false;
bool foundMatch = false;
for (auto const &format : physical_device_state->surface_formats) {
if (pCreateInfo->imageFormat == format.format) {
// Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
foundFormat = true;
if (pCreateInfo->imageColorSpace == format.colorSpace) {
foundMatch = true;
break;
}
} else {
if (pCreateInfo->imageColorSpace == format.colorSpace) {
foundColorSpace = true;
}
}
}
if (!foundMatch) {
if (!foundFormat) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
"%s called with a non-supported pCreateInfo->imageFormat (i.e. %d).", func_name,
pCreateInfo->imageFormat))
return true;
}
if (!foundColorSpace) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
"%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d).", func_name,
pCreateInfo->imageColorSpace))
return true;
}
}
}
// Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
// FIFO is required to always be supported
if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery,
"%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
return true;
}
} else {
// Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
pCreateInfo->presentMode) != physical_device_state->present_modes.end();
if (!foundMatch) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-presentMode-01281",
"%s called with a non-supported presentMode (i.e. %s).", func_name,
string_VkPresentModeKHR(pCreateInfo->presentMode)))
return true;
}
}
// Validate state for shared presentable case
if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
if (!dev_data->extensions.vk_khr_shared_presentable_image) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), kVUID_Core_DrawState_ExtensionNotEnabled,
"%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
"been enabled.",
func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
return true;
} else if (pCreateInfo->minImageCount != 1) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-minImageCount-01383",
"%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
"must be 1.",
func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount))
return true;
}
}
return false;
}
static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
SWAPCHAIN_NODE *old_swapchain_state) {
if (VK_SUCCESS == result) {
lock_guard_t lock(global_lock);
auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
swapchain_state->shared_presentable = true;
}
surface_state->swapchain = swapchain_state.get();
dev_data->swapchainMap[*pSwapchain] = std::move(swapchain_state);
} else {
surface_state->swapchain = nullptr;
}
// Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
if (old_swapchain_state) {
old_swapchain_state->replaced = true;
}
surface_state->old_swapchain = old_swapchain_state;
return;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
auto surface_state = GetSurfaceState(dev_data->instance_data, pCreateInfo->surface);
auto old_swapchain_state = GetSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
return result;
}
static void PreCallRecordDestroySwapchainKHR(layer_data *dev_data, const VkSwapchainKHR swapchain) {
auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
if (swapchain_data) {
if (swapchain_data->images.size() > 0) {
for (auto swapchain_image : swapchain_data->images) {
auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
if (image_sub != dev_data->imageSubresourceMap.end()) {
for (auto imgsubpair : image_sub->second) {
auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
if (image_item != dev_data->imageLayoutMap.end()) {
dev_data->imageLayoutMap.erase(image_item);
}
}
dev_data->imageSubresourceMap.erase(image_sub);
}
ClearMemoryObjectBindings(dev_data, HandleToUint64(swapchain_image), kVulkanObjectTypeSwapchainKHR);
EraseQFOImageRelaseBarriers(dev_data, swapchain_image);
dev_data->imageMap.erase(swapchain_image);
}
}
auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
if (surface_state) {
if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
if (surface_state->old_swapchain == swapchain_data) surface_state->old_swapchain = nullptr;
}
dev_data->swapchainMap.erase(swapchain);
}
}
VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroySwapchainKHR(dev_data, swapchain);
lock.unlock();
dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
}
static bool PreCallValidateGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
bool skip = false;
if (swapchain_state && pSwapchainImages) {
lock_guard_t lock(global_lock);
// Compare the preliminary value of *pSwapchainImageCount with the value this time:
if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), kVUID_Core_Swapchain_PriorCount,
"vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive value has "
"been seen for pSwapchainImages.");
} else if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
skip |=
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), kVUID_Core_Swapchain_InvalidCount,
"vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with pSwapchainImages set to a "
"value (%d) that is greater than the value (%d) that was returned when pSwapchainImageCount was NULL.",
*pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
}
}
return skip;
}
static void PostCallRecordGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
lock_guard_t lock(global_lock);
if (*pSwapchainImageCount > swapchain_state->images.size()) swapchain_state->images.resize(*pSwapchainImageCount);
if (pSwapchainImages) {
if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_DETAILS) {
swapchain_state->vkGetSwapchainImagesKHRState = QUERY_DETAILS;
}
for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
if (swapchain_state->images[i] != VK_NULL_HANDLE) continue; // Already retrieved this.
IMAGE_LAYOUT_NODE image_layout_node;
image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
image_layout_node.format = swapchain_state->createInfo.imageFormat;
// Add imageMap entries for each swapchain image
VkImageCreateInfo image_ci = {};
image_ci.flags = 0;
image_ci.imageType = VK_IMAGE_TYPE_2D;
image_ci.format = swapchain_state->createInfo.imageFormat;
image_ci.extent.width = swapchain_state->createInfo.imageExtent.width;
image_ci.extent.height = swapchain_state->createInfo.imageExtent.height;
image_ci.extent.depth = 1;
image_ci.mipLevels = 1;
image_ci.arrayLayers = swapchain_state->createInfo.imageArrayLayers;
image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
image_ci.usage = swapchain_state->createInfo.imageUsage;
image_ci.sharingMode = swapchain_state->createInfo.imageSharingMode;
device_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
auto &image_state = device_data->imageMap[pSwapchainImages[i]];
image_state->valid = false;
image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
swapchain_state->images[i] = pSwapchainImages[i];
ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
device_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
device_data->imageLayoutMap[subpair] = image_layout_node;
}
}
if (*pSwapchainImageCount) {
if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_COUNT) {
swapchain_state->vkGetSwapchainImagesKHRState = QUERY_COUNT;
}
swapchain_state->get_swapchain_image_count = *pSwapchainImageCount;
}
}
VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
auto swapchain_state = GetSwapchainNode(device_data, swapchain);
bool skip = PreCallValidateGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
if (!skip) {
result = device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
}
if ((result == VK_SUCCESS || result == VK_INCOMPLETE)) {
PostCallRecordGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
}
return result;
}
static bool PreCallValidateQueuePresentKHR(layer_data *dev_data, VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
bool skip = false;
lock_guard_t lock(global_lock);
auto queue_state = GetQueueState(dev_data, queue);
for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
if (pSemaphore && !pSemaphore->signaled) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
kVUID_Core_DrawState_QueueForwardProgress,
"Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
HandleToUint64(queue), HandleToUint64(pPresentInfo->pWaitSemaphores[i]));
}
}
for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
if (swapchain_data) {
if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainInvalidImage,
"vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
} else {
auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
auto image_state = GetImageState(dev_data, image);
if (image_state->shared_presentable) {
image_state->layout_locked = true;
}
if (!image_state->acquired) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainImageNotAcquired,
"vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]);
}
vector<VkImageLayout> layouts;
if (FindLayouts(dev_data, image, layouts)) {
for (auto layout : layouts) {
if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!dev_data->extensions.vk_khr_shared_presentable_image ||
(layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
HandleToUint64(queue), "VUID-VkPresentInfoKHR-pImageIndices-01296",
"Images passed to present must be in layout VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.",
string_VkImageLayout(layout));
}
}
}
}
// All physical devices and queue families are required to be able to present to any native window on Android; require
// the application to have established support on any other platform.
if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
if (support_it == surface_state->gpu_queue_support.end()) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainUnsupportedQueue,
"vkQueuePresentKHR: Presenting image without calling vkGetPhysicalDeviceSurfaceSupportKHR");
} else if (!support_it->second) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-vkQueuePresentKHR-pSwapchains-01292",
"vkQueuePresentKHR: Presenting image on queue that cannot present to this surface.");
}
}
}
}
if (pPresentInfo && pPresentInfo->pNext) {
// Verify ext struct
const auto *present_regions = lvl_find_in_chain<VkPresentRegionsKHR>(pPresentInfo->pNext);
if (present_regions) {
for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
assert(swapchain_data);
VkPresentRegionKHR region = present_regions->pRegions[i];
for (uint32_t j = 0; j < region.rectangleCount; ++j) {
VkRectLayerKHR rect = region.pRectangles[j];
if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
"VUID-VkRectLayerKHR-offset-01261",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
"pRegion[%i].pRectangles[%i], the sum of offset.x (%i) and extent.width (%i) is greater "
"than the corresponding swapchain's imageExtent.width (%i).",
i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
}
if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
"VUID-VkRectLayerKHR-offset-01261",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
"pRegion[%i].pRectangles[%i], the sum of offset.y (%i) and extent.height (%i) is greater "
"than the corresponding swapchain's imageExtent.height (%i).",
i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
}
if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-VkRectLayerKHR-layer-01262",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer "
"(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
}
}
}
}
const auto *present_times_info = lvl_find_in_chain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext);
if (present_times_info) {
if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[0]), "VUID-VkPresentTimesInfoGOOGLE-swapchainCount-01247",
"vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount "
"is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, "
"VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.",
present_times_info->swapchainCount, pPresentInfo->swapchainCount);
}
}
}
return skip;
}
static void PostCallRecordQueuePresentKHR(layer_data *dev_data, const VkPresentInfoKHR *pPresentInfo, const VkResult &result) {
// Semaphore waits occur before error generation, if the call reached the ICD. (Confirm?)
for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
if (pSemaphore) {
pSemaphore->signaler.first = VK_NULL_HANDLE;
pSemaphore->signaled = false;
}
}
for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
// Note: this is imperfect, in that we can get confused about what did or didn't succeed-- but if the app does that, it's
// confused itself just as much.
auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue; // this present didn't actually happen.
// Mark the image as having been released to the WSI
auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
auto image_state = GetImageState(dev_data, image);
image_state->acquired = false;
}
// Note: even though presentation is directed to a queue, there is no direct ordering between QP and subsequent work, so QP (and
// its semaphore waits) /never/ participate in any completion proof.
}
VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
bool skip = PreCallValidateQueuePresentKHR(dev_data, queue, pPresentInfo);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
PostCallRecordQueuePresentKHR(dev_data, pPresentInfo, result);
}
return result;
}
static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
std::vector<SURFACE_STATE *> &surface_state,
std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
if (pCreateInfos) {
lock_guard_t lock(global_lock);
for (uint32_t i = 0; i < swapchainCount; i++) {
surface_state.push_back(GetSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
old_swapchain_state.push_back(GetSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
std::stringstream func_name;
func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i],
old_swapchain_state[i])) {
return true;
}
}
}
return false;
}
static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
std::vector<SURFACE_STATE *> &surface_state,
std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
if (VK_SUCCESS == result) {
for (uint32_t i = 0; i < swapchainCount; i++) {
auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfos[i].presentMode ||
VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfos[i].presentMode) {
swapchain_state->shared_presentable = true;
}
surface_state[i]->swapchain = swapchain_state.get();
dev_data->swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
}
} else {
for (uint32_t i = 0; i < swapchainCount; i++) {
surface_state[i]->swapchain = nullptr;
}
}
// Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
for (uint32_t i = 0; i < swapchainCount; i++) {
if (old_swapchain_state[i]) {
old_swapchain_state[i]->replaced = true;
}
surface_state[i]->old_swapchain = old_swapchain_state[i];
}
return;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
const VkSwapchainCreateInfoKHR *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
std::vector<SURFACE_STATE *> surface_state;
std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
old_swapchain_state)) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
VkResult result =
dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
old_swapchain_state);
return result;
}
static bool PreCallValidateAcquireNextImageKHR(layer_data *dev_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
bool skip = false;
if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-vkAcquireNextImageKHR-semaphore-01780",
"vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way to "
"determine the completion of this operation.");
}
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore && pSemaphore->scope == kSyncScopeInternal && pSemaphore->signaled) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), "VUID-vkAcquireNextImageKHR-semaphore-01286",
"vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state.");
}
auto pFence = GetFenceNode(dev_data, fence);
if (pFence) {
skip |= ValidateFenceForSubmit(dev_data, pFence);
}
auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
if (swapchain_data->replaced) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(swapchain), "VUID-vkAcquireNextImageKHR-swapchain-01285",
"vkAcquireNextImageKHR: This swapchain has been retired. The application can still present any images it "
"has acquired, but cannot acquire any more.");
}
auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
[=](VkImage image) { return GetImageState(dev_data, image)->acquired; });
if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(swapchain), kVUID_Core_DrawState_SwapchainTooManyImages,
"vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
acquired_images);
}
}
if (swapchain_data->images.size() == 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(swapchain), kVUID_Core_DrawState_SwapchainImagesNotFound,
"vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
"vkGetSwapchainImagesKHR after swapchain creation.");
}
return skip;
}
static void PostCallRecordAcquireNextImageKHR(layer_data *dev_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
auto pFence = GetFenceNode(dev_data, fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
// Treat as inflight since it is valid to wait on this fence, even in cases where it is technically a temporary
// import
pFence->state = FENCE_INFLIGHT;
pFence->signaler.first = VK_NULL_HANDLE; // ANI isn't on a queue, so this can't participate in a completion proof.
}
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
// Treat as signaled since it is valid to wait on this semaphore, even in cases where it is technically a
// temporary import
pSemaphore->signaled = true;
pSemaphore->signaler.first = VK_NULL_HANDLE;
}
// Mark the image as acquired.
auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
auto image = swapchain_data->images[*pImageIndex];
auto image_state = GetImageState(dev_data, image);
image_state->acquired = true;
image_state->shared_presentable = swapchain_data->shared_presentable;
}
static bool PreCallValidateAcquireNextImage2KHR(layer_data *dev_data, VkDevice device,
const VkAcquireNextImageInfoKHR *pAcquireInfo, uint32_t *pImageIndex) {
// TODO: unify as appropriate with PreCallValidateAcquireNextImage2KHR; get
// new VUIDs assigned for AcquireNextImage2KHR-specific cases
bool skip = false;
if (pAcquireInfo->fence == VK_NULL_HANDLE && pAcquireInfo->semaphore == VK_NULL_HANDLE) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-vkAcquireNextImageKHR-semaphore-01780",
"vkAcquireNextImage2KHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way to "
"determine the completion of this operation.");
}
auto pSemaphore = GetSemaphoreNode(dev_data, pAcquireInfo->semaphore);
if (pSemaphore && pSemaphore->scope == kSyncScopeInternal && pSemaphore->signaled) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(pAcquireInfo->semaphore), "VUID-vkAcquireNextImageKHR-semaphore-01286",
"vkAcquireNextImage2KHR: Semaphore must not be currently signaled or in a wait state.");
}
auto pFence = GetFenceNode(dev_data, pAcquireInfo->fence);
if (pFence) {
skip |= ValidateFenceForSubmit(dev_data, pFence);
}
auto swapchain_data = GetSwapchainNode(dev_data, pAcquireInfo->swapchain);
if (swapchain_data->replaced) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pAcquireInfo->swapchain), "VUID-vkAcquireNextImageKHR-swapchain-01285",
"vkAcquireNextImage2KHR: This swapchain has been retired. The application can still present any images it "
"has acquired, but cannot acquire any more.");
}
auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
[=](VkImage image) { return GetImageState(dev_data, image)->acquired; });
if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pAcquireInfo->swapchain), kVUID_Core_DrawState_SwapchainTooManyImages,
"vkAcquireNextImage2KHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64
")",
acquired_images);
}
}
if (swapchain_data->images.size() == 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pAcquireInfo->swapchain), kVUID_Core_DrawState_SwapchainImagesNotFound,
"vkAcquireNextImage2KHR: No images found to acquire from. Application probably did not call "
"vkGetSwapchainImagesKHR after swapchain creation.");
}
return skip;
}
static void PostCallRecordAcquireNextImage2KHR(layer_data *dev_data, VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex) {
auto pFence = GetFenceNode(dev_data, pAcquireInfo->fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
// Treat as inflight since it is valid to wait on this fence, even in cases where it is technically a temporary
// import
pFence->state = FENCE_INFLIGHT;
pFence->signaler.first = VK_NULL_HANDLE; // ANI isn't on a queue, so this can't participate in a completion proof.
}
auto pSemaphore = GetSemaphoreNode(dev_data, pAcquireInfo->semaphore);
if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
// Treat as signaled since it is valid to wait on this semaphore, even in cases where it is technically a
// temporary import
pSemaphore->signaled = true;
pSemaphore->signaler.first = VK_NULL_HANDLE;
}
// Mark the image as acquired.
auto swapchain_data = GetSwapchainNode(dev_data, pAcquireInfo->swapchain);
auto image = swapchain_data->images[*pImageIndex];
auto image_state = GetImageState(dev_data, image);
image_state->acquired = true;
image_state->shared_presentable = swapchain_data->shared_presentable;
// TODO: unify as appropriate with PostCallRecordAcquireNextImageKHR;
// consider physical device masks
}
VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateAcquireNextImageKHR(dev_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
lock.lock();
if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
PostCallRecordAcquireNextImageKHR(dev_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
}
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateAcquireNextImage2KHR(dev_data, device, pAcquireInfo, pImageIndex);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.AcquireNextImage2KHR(device, pAcquireInfo, pImageIndex);
lock.lock();
if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
PostCallRecordAcquireNextImage2KHR(dev_data, device, pAcquireInfo, pImageIndex);
}
lock.unlock();
return result;
}
static bool PreCallValidateEnumeratePhysicalDevices(instance_layer_data *instance_data, uint32_t *pPhysicalDeviceCount) {
bool skip = false;
if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
// Flag warning here. You can call this without having queried the count, but it may not be
// robust on platforms with multiple physical devices.
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0,
kVUID_Core_DevLimit_MissingQueryCount,
"Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first call "
"vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
} // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
// Having actual count match count from app is not a requirement, so this can be a warning
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, kVUID_Core_DevLimit_CountMismatch,
"Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count supported by "
"this instance is %u.",
*pPhysicalDeviceCount, instance_data->physical_devices_count);
}
return skip;
}
static void PreCallRecordEnumeratePhysicalDevices(instance_layer_data *instance_data) {
instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
}
static void PostCallRecordEnumeratePhysicalDevices(instance_layer_data *instance_data, const VkResult &result,
uint32_t *pPhysicalDeviceCount, VkPhysicalDevice *pPhysicalDevices) {
if (NULL == pPhysicalDevices) {
instance_data->physical_devices_count = *pPhysicalDeviceCount;
} else if (result == VK_SUCCESS) { // Save physical devices
for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
phys_device_state.phys_device = pPhysicalDevices[i];
// Init actual features for each physical device
instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features2.features);
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
VkPhysicalDevice *pPhysicalDevices) {
bool skip = false;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
assert(instance_data);
unique_lock_t lock(global_lock);
// For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
if (pPhysicalDevices) {
skip |= PreCallValidateEnumeratePhysicalDevices(instance_data, pPhysicalDeviceCount);
}
PreCallRecordEnumeratePhysicalDevices(instance_data);
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
lock.lock();
PostCallRecordEnumeratePhysicalDevices(instance_data, result, pPhysicalDeviceCount, pPhysicalDevices);
return result;
}
// Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
PHYSICAL_DEVICE_STATE *pd_state,
uint32_t requested_queue_family_property_count, bool qfp_null,
const char *caller_name) {
bool skip = false;
if (!qfp_null) {
// Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count
if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
skip |= log_msg(
instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(pd_state->phys_device), kVUID_Core_DevLimit_MissingQueryCount,
"%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended "
"to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.",
caller_name, caller_name);
// Then verify that pCount that is passed in on second call matches what was returned
} else if (pd_state->queue_family_count != requested_queue_family_property_count) {
skip |= log_msg(
instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(pd_state->phys_device), kVUID_Core_DevLimit_CountMismatch,
"%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32
", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32
". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was "
"previously obtained by calling %s with NULL pQueueFamilyProperties.",
caller_name, requested_queue_family_property_count, pd_state->queue_family_count, caller_name, caller_name);
}
pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
}
return skip;
}
static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
PHYSICAL_DEVICE_STATE *pd_state,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties *pQueueFamilyProperties) {
return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
(nullptr == pQueueFamilyProperties),
"vkGetPhysicalDeviceQueueFamilyProperties()");
}
static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_layer_data *instance_data,
PHYSICAL_DEVICE_STATE *pd_state,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
(nullptr == pQueueFamilyProperties),
"vkGetPhysicalDeviceQueueFamilyProperties2[KHR]()");
}
// Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
if (!pQueueFamilyProperties) {
if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
pd_state->queue_family_count = count;
} else { // Save queue family properties
pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
pd_state->queue_family_count = std::max(pd_state->queue_family_count, count);
pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
for (uint32_t i = 0; i < count; ++i) {
pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
}
}
}
static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
VkQueueFamilyProperties *pQueueFamilyProperties) {
VkQueueFamilyProperties2KHR *pqfp = nullptr;
std::vector<VkQueueFamilyProperties2KHR> qfp;
qfp.resize(count);
if (pQueueFamilyProperties) {
for (uint32_t i = 0; i < count; ++i) {
qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
qfp[i].pNext = nullptr;
qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
}
pqfp = qfp.data();
}
StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pqfp);
}
static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pQueueFamilyProperties);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties *pQueueFamilyProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
assert(physical_device_state);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state,
pQueueFamilyPropertyCount, pQueueFamilyProperties);
lock.unlock();
if (skip) return;
instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
pQueueFamilyProperties);
lock.lock();
PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
assert(physical_device_state);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_data, physical_device_state,
pQueueFamilyPropertyCount, pQueueFamilyProperties);
lock.unlock();
if (skip) return;
instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2(physicalDevice, pQueueFamilyPropertyCount,
pQueueFamilyProperties);
lock.lock();
PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(physical_device_state, *pQueueFamilyPropertyCount,
pQueueFamilyProperties);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
assert(physical_device_state);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_data, physical_device_state,
pQueueFamilyPropertyCount, pQueueFamilyProperties);
lock.unlock();
if (skip) return;
instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
pQueueFamilyProperties);
lock.lock();
PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(physical_device_state, *pQueueFamilyPropertyCount,
pQueueFamilyProperties);
}
template <typename TCreateInfo, typename FPtr>
static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo, VkAllocationCallbacks const *pAllocator,
VkSurfaceKHR *pSurface, FPtr fptr) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
// Call down the call chain:
VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
if (result == VK_SUCCESS) {
unique_lock_t lock(global_lock);
instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
lock.unlock();
}
return result;
}
static bool PreCallValidateDestroySurfaceKHR(instance_layer_data *instance_data, VkInstance instance, VkSurfaceKHR surface) {
auto surface_state = GetSurfaceState(instance_data, surface);
bool skip = false;
if ((surface_state) && (surface_state->swapchain)) {
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
HandleToUint64(instance), "VUID-vkDestroySurfaceKHR-surface-01266",
"vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed.");
}
return skip;
}
static void PreCallRecordValidateDestroySurfaceKHR(instance_layer_data *instance_data, VkSurfaceKHR surface) {
instance_data->surface_map.erase(surface);
}
VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroySurfaceKHR(instance_data, instance, surface);
// Pre-record to avoid Destroy/Create race
PreCallRecordValidateDestroySurfaceKHR(instance_data, surface);
lock.unlock();
if (!skip) {
instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
#ifdef VK_USE_PLATFORM_IOS_MVK
VKAPI_ATTR VkResult VKAPI_CALL CreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateIOSSurfaceMVK);
}
#endif // VK_USE_PLATFORM_IOS_MVK
#ifdef VK_USE_PLATFORM_MACOS_MVK
VKAPI_ATTR VkResult VKAPI_CALL CreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMacOSSurfaceMVK);
}
#endif // VK_USE_PLATFORM_MACOS_MVK
#ifdef VK_USE_PLATFORM_MIR_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
}
static bool PreCallValidateGetPhysicalDeviceMirPresentationSupportKHR(instance_layer_data *instance_data,
VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex) {
const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceMirPresentationSupportKHR-queueFamilyIndex-01265",
"vkGetPhysicalDeviceMirPresentationSupportKHR", "queueFamilyIndex");
}
VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceMirPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, MirConnection *connection) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceMirPresentationSupportKHR(instance_data, physicalDevice, queueFamilyIndex);
lock.unlock();
if (skip) return VK_FALSE;
// Call down the call chain:
VkBool32 result =
instance_data->dispatch_table.GetPhysicalDeviceMirPresentationSupportKHR(physicalDevice, queueFamilyIndex, connection);
return result;
}
#endif // VK_USE_PLATFORM_MIR_KHR
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
}
static bool PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(instance_layer_data *instance_data,
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex) {
const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWaylandPresentationSupportKHR-queueFamilyIndex-01306",
"vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
}
VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
struct wl_display *display) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(instance_data, physicalDevice, queueFamilyIndex);
lock.unlock();
if (skip) return VK_FALSE;
// Call down the call chain:
VkBool32 result =
instance_data->dispatch_table.GetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex, display);
return result;
}
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
}
static bool PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(instance_layer_data *instance_data,
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex) {
const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWin32PresentationSupportKHR-queueFamilyIndex-01309",
"vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
}
VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(instance_data, physicalDevice, queueFamilyIndex);
lock.unlock();
if (skip) return VK_FALSE;
// Call down the call chain:
VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex);
return result;
}
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
}
static bool PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(instance_layer_data *instance_data,
VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex) {
const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXcbPresentationSupportKHR-queueFamilyIndex-01312",
"vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
}
VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, xcb_connection_t *connection,
xcb_visualid_t visual_id) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(instance_data, physicalDevice, queueFamilyIndex);
lock.unlock();
if (skip) return VK_FALSE;
// Call down the call chain:
VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex,
connection, visual_id);
return result;
}
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
}
static bool PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(instance_layer_data *instance_data,
VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex) {
const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXlibPresentationSupportKHR-queueFamilyIndex-01315",
"vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
}
VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, Display *dpy,
VisualID visualID) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(instance_data, physicalDevice, queueFamilyIndex);
lock.unlock();
if (skip) return VK_FALSE;
// Call down the call chain:
VkBool32 result =
instance_data->dispatch_table.GetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex, dpy, visualID);
return result;
}
#endif // VK_USE_PLATFORM_XLIB_KHR
static void PostCallRecordGetPhysicalDeviceSurfaceCapabilitiesKHR(instance_layer_data *instance_data,
VkPhysicalDevice physicalDevice,
VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto result =
instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
unique_lock_t lock(global_lock);
if (result == VK_SUCCESS) {
PostCallRecordGetPhysicalDeviceSurfaceCapabilitiesKHR(instance_data, physicalDevice, pSurfaceCapabilities);
}
return result;
}
static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instance_layer_data *instanceData,
VkPhysicalDevice physicalDevice,
VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
unique_lock_t lock(global_lock);
auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
physicalDeviceState->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities;
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto result =
instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2KHR(physicalDevice, pSurfaceInfo, pSurfaceCapabilities);
if (result == VK_SUCCESS) {
PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instanceData, physicalDevice, pSurfaceCapabilities);
}
return result;
}
static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instance_layer_data *instanceData,
VkPhysicalDevice physicalDevice,
VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
unique_lock_t lock(global_lock);
auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
physicalDeviceState->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount;
physicalDeviceState->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount;
physicalDeviceState->surfaceCapabilities.currentExtent = pSurfaceCapabilities->currentExtent;
physicalDeviceState->surfaceCapabilities.minImageExtent = pSurfaceCapabilities->minImageExtent;
physicalDeviceState->surfaceCapabilities.maxImageExtent = pSurfaceCapabilities->maxImageExtent;
physicalDeviceState->surfaceCapabilities.maxImageArrayLayers = pSurfaceCapabilities->maxImageArrayLayers;
physicalDeviceState->surfaceCapabilities.supportedTransforms = pSurfaceCapabilities->supportedTransforms;
physicalDeviceState->surfaceCapabilities.currentTransform = pSurfaceCapabilities->currentTransform;
physicalDeviceState->surfaceCapabilities.supportedCompositeAlpha = pSurfaceCapabilities->supportedCompositeAlpha;
physicalDeviceState->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags;
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto result =
instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities);
if (result == VK_SUCCESS) {
PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instanceData, physicalDevice, pSurfaceCapabilities);
}
return result;
}
static bool PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(instance_layer_data *instance_data,
PHYSICAL_DEVICE_STATE *physical_device_state,
uint32_t queueFamilyIndex) {
return ValidatePhysicalDeviceQueueFamily(instance_data, physical_device_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceSurfaceSupportKHR-queueFamilyIndex-01269",
"vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
}
static void PostCallRecordGetPhysicalDeviceSurfaceSupportKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, VkSurfaceKHR surface,
VkBool32 *pSupported) {
auto surface_state = GetSurfaceState(instance_data, surface);
surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE);
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
VkSurfaceKHR surface, VkBool32 *pSupported) {
bool skip = false;
auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
skip |= PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(instance_data, pd_state, queueFamilyIndex);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
auto result =
instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
if (result == VK_SUCCESS) {
lock.lock();
PostCallRecordGetPhysicalDeviceSurfaceSupportKHR(instance_data, physicalDevice, queueFamilyIndex, surface, pSupported);
}
return result;
}
static bool PreCallValidateGetPhysicalDeviceSurfacePresentModesKHR(instance_layer_data *instance_data,
PHYSICAL_DEVICE_STATE *physical_device_state,
CALL_STATE &call_state, VkPhysicalDevice physicalDevice,
uint32_t *pPresentModeCount) {
// Compare the preliminary value of *pPresentModeCount with the value this time:
auto prev_mode_count = (uint32_t)physical_device_state->present_modes.size();
bool skip = false;
switch (call_state) {
case UNCALLED:
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
kVUID_Core_DevLimit_MustQueryCount,
"vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior "
"positive value has been seen for pPresentModeCount.");
break;
default:
// both query count and query details
if (*pPresentModeCount != prev_mode_count) {
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
kVUID_Core_DevLimit_CountMismatch,
"vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that differs "
"from the value (%u) that was returned when pPresentModes was NULL.",
*pPresentModeCount, prev_mode_count);
}
break;
}
return skip;
}
static void PostCallRecordGetPhysicalDeviceSurfacePresentModesKHR(PHYSICAL_DEVICE_STATE *physical_device_state,
CALL_STATE &call_state, uint32_t *pPresentModeCount,
VkPresentModeKHR *pPresentModes) {
if (*pPresentModeCount) {
if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
if (*pPresentModeCount > physical_device_state->present_modes.size())
physical_device_state->present_modes.resize(*pPresentModeCount);
}
if (pPresentModes) {
if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
for (uint32_t i = 0; i < *pPresentModeCount; i++) {
physical_device_state->present_modes[i] = pPresentModes[i];
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
uint32_t *pPresentModeCount,
VkPresentModeKHR *pPresentModes) {
bool skip = false;
auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
// TODO: this isn't quite right. available modes may differ by surface AND physical device.
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
if (pPresentModes) {
skip |= PreCallValidateGetPhysicalDeviceSurfacePresentModesKHR(instance_data, physical_device_state, call_state,
physicalDevice, pPresentModeCount);
}
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount,
pPresentModes);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
lock.lock();
PostCallRecordGetPhysicalDeviceSurfacePresentModesKHR(physical_device_state, call_state, pPresentModeCount, pPresentModes);
}
return result;
}
static bool PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(instance_layer_data *instance_data,
PHYSICAL_DEVICE_STATE *physical_device_state, CALL_STATE &call_state,
VkPhysicalDevice physicalDevice, uint32_t *pSurfaceFormatCount) {
auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
bool skip = false;
switch (call_state) {
case UNCALLED:
// Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application
// didn't
// previously call this function with a NULL value of pSurfaceFormats:
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
kVUID_Core_DevLimit_MustQueryCount,
"vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior "
"positive value has been seen for pSurfaceFormats.");
break;
default:
if (prev_format_count != *pSurfaceFormatCount) {
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
kVUID_Core_DevLimit_CountMismatch,
"vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with "
"pSurfaceFormats set to a value (%u) that is greater than the value (%u) that was returned "
"when pSurfaceFormatCount was NULL.",
*pSurfaceFormatCount, prev_format_count);
}
break;
}
return skip;
}
static void PostCallRecordGetPhysicalDeviceSurfaceFormatsKHR(PHYSICAL_DEVICE_STATE *physical_device_state, CALL_STATE &call_state,
uint32_t *pSurfaceFormatCount, VkSurfaceFormatKHR *pSurfaceFormats) {
if (*pSurfaceFormatCount) {
if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
}
if (pSurfaceFormats) {
if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
physical_device_state->surface_formats[i] = pSurfaceFormats[i];
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormatKHR *pSurfaceFormats) {
bool skip = false;
auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
if (pSurfaceFormats) {
skip |= PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(instance_data, physical_device_state, call_state, physicalDevice,
pSurfaceFormatCount);
}
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
// Call down the call chain:
auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
pSurfaceFormats);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
lock.lock();
PostCallRecordGetPhysicalDeviceSurfaceFormatsKHR(physical_device_state, call_state, pSurfaceFormatCount, pSurfaceFormats);
}
return result;
}
static void PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instance_layer_data *instanceData, VkPhysicalDevice physicalDevice,
uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats) {
unique_lock_t lock(global_lock);
auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
if (*pSurfaceFormatCount) {
if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) {
physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT;
}
if (*pSurfaceFormatCount > physicalDeviceState->surface_formats.size())
physicalDeviceState->surface_formats.resize(*pSurfaceFormatCount);
}
if (pSurfaceFormats) {
if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) {
physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS;
}
for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
physicalDeviceState->surface_formats[i] = pSurfaceFormats[i].surfaceFormat;
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormat2KHR *pSurfaceFormats) {
auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto result = instanceData->dispatch_table.GetPhysicalDeviceSurfaceFormats2KHR(physicalDevice, pSurfaceInfo,
pSurfaceFormatCount, pSurfaceFormats);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instanceData, physicalDevice, pSurfaceFormatCount, pSurfaceFormats);
}
return result;
}
// VK_EXT_debug_utils commands
static void PreCallRecordSetDebugUtilsObectNameEXT(layer_data *dev_data, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) {
if (pNameInfo->pObjectName) {
lock_guard_t lock(global_lock);
dev_data->report_data->debugUtilsObjectNameMap->insert(
std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->objectHandle, pNameInfo->pObjectName));
} else {
lock_guard_t lock(global_lock);
dev_data->report_data->debugUtilsObjectNameMap->erase(pNameInfo->objectHandle);
}
}
VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectNameEXT(VkDevice device, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_SUCCESS;
PreCallRecordSetDebugUtilsObectNameEXT(dev_data, pNameInfo);
if (nullptr != dev_data->dispatch_table.SetDebugUtilsObjectNameEXT) {
result = dev_data->dispatch_table.SetDebugUtilsObjectNameEXT(device, pNameInfo);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectTagEXT(VkDevice device, const VkDebugUtilsObjectTagInfoEXT *pTagInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_SUCCESS;
if (nullptr != dev_data->dispatch_table.SetDebugUtilsObjectTagEXT) {
result = dev_data->dispatch_table.SetDebugUtilsObjectTagEXT(device, pTagInfo);
}
return result;
}
static void PreCallRecordQueueBeginDebugUtilsLabelEXT(layer_data *dev_data, VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
BeginQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
}
VKAPI_ATTR void VKAPI_CALL QueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
std::unique_lock<std::mutex> lock(global_lock);
PreCallRecordQueueBeginDebugUtilsLabelEXT(dev_data, queue, pLabelInfo);
lock.unlock();
if (nullptr != dev_data->dispatch_table.QueueBeginDebugUtilsLabelEXT) {
dev_data->dispatch_table.QueueBeginDebugUtilsLabelEXT(queue, pLabelInfo);
}
}
static void PostCallRecordQueueEndDebugUtilsLabelEXT(layer_data *dev_data, VkQueue queue) {
EndQueueDebugUtilsLabel(dev_data->report_data, queue);
}
VKAPI_ATTR void VKAPI_CALL QueueEndDebugUtilsLabelEXT(VkQueue queue) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
if (nullptr != dev_data->dispatch_table.QueueEndDebugUtilsLabelEXT) {
dev_data->dispatch_table.QueueEndDebugUtilsLabelEXT(queue);
}
lock_guard_t lock(global_lock);
PostCallRecordQueueEndDebugUtilsLabelEXT(dev_data, queue);
}
static void PreCallRecordQueueInsertDebugUtilsLabelEXT(layer_data *dev_data, VkQueue queue,
const VkDebugUtilsLabelEXT *pLabelInfo) {
InsertQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
}
VKAPI_ATTR void VKAPI_CALL QueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
std::unique_lock<std::mutex> lock(global_lock);
PreCallRecordQueueInsertDebugUtilsLabelEXT(dev_data, queue, pLabelInfo);
lock.unlock();
if (nullptr != dev_data->dispatch_table.QueueInsertDebugUtilsLabelEXT) {
dev_data->dispatch_table.QueueInsertDebugUtilsLabelEXT(queue, pLabelInfo);
}
}
static void PreCallRecordCmdBeginDebugUtilsLabelEXT(layer_data *dev_data, VkCommandBuffer commandBuffer,
const VkDebugUtilsLabelEXT *pLabelInfo) {
BeginCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
}
VKAPI_ATTR void VKAPI_CALL CmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
std::unique_lock<std::mutex> lock(global_lock);
PreCallRecordCmdBeginDebugUtilsLabelEXT(dev_data, commandBuffer, pLabelInfo);
lock.unlock();
if (nullptr != dev_data->dispatch_table.CmdBeginDebugUtilsLabelEXT) {
dev_data->dispatch_table.CmdBeginDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
}
}
static void PostCallRecordCmdEndDebugUtilsLabelEXT(layer_data *dev_data, VkCommandBuffer commandBuffer) {
EndCmdDebugUtilsLabel(dev_data->report_data, commandBuffer);
}
VKAPI_ATTR void VKAPI_CALL CmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
if (nullptr != dev_data->dispatch_table.CmdEndDebugUtilsLabelEXT) {
dev_data->dispatch_table.CmdEndDebugUtilsLabelEXT(commandBuffer);
}
lock_guard_t lock(global_lock);
PostCallRecordCmdEndDebugUtilsLabelEXT(dev_data, commandBuffer);
}
static void PreCallRecordCmdInsertDebugUtilsLabelEXT(layer_data *dev_data, VkCommandBuffer commandBuffer,
const VkDebugUtilsLabelEXT *pLabelInfo) {
InsertCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
}
VKAPI_ATTR void VKAPI_CALL CmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
std::unique_lock<std::mutex> lock(global_lock);
PreCallRecordCmdInsertDebugUtilsLabelEXT(dev_data, commandBuffer, pLabelInfo);
lock.unlock();
if (nullptr != dev_data->dispatch_table.CmdInsertDebugUtilsLabelEXT) {
dev_data->dispatch_table.CmdInsertDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
}
}
static VkResult PostCallRecordCreateDebugUtilsMessengerEXT(instance_layer_data *instance_data,
const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDebugUtilsMessengerEXT *pMessenger) {
return layer_create_messenger_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMessenger);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDebugUtilsMessengerEXT(VkInstance instance,
const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDebugUtilsMessengerEXT *pMessenger) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
VkResult result = instance_data->dispatch_table.CreateDebugUtilsMessengerEXT(instance, pCreateInfo, pAllocator, pMessenger);
if (VK_SUCCESS == result) {
result = PostCallRecordCreateDebugUtilsMessengerEXT(instance_data, pCreateInfo, pAllocator, pMessenger);
}
return result;
}
static void PostCallRecordDestroyDebugUtilsMessengerEXT(instance_layer_data *instance_data, VkDebugUtilsMessengerEXT messenger,
const VkAllocationCallbacks *pAllocator) {
layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL DestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
const VkAllocationCallbacks *pAllocator) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
instance_data->dispatch_table.DestroyDebugUtilsMessengerEXT(instance, messenger, pAllocator);
PostCallRecordDestroyDebugUtilsMessengerEXT(instance_data, messenger, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL SubmitDebugUtilsMessageEXT(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
VkDebugUtilsMessageTypeFlagsEXT messageTypes,
const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
instance_data->dispatch_table.SubmitDebugUtilsMessageEXT(instance, messageSeverity, messageTypes, pCallbackData);
}
// VK_EXT_debug_report commands
static VkResult PostCallRecordCreateDebugReportCallbackEXT(instance_layer_data *instance_data,
const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDebugReportCallbackEXT *pMsgCallback) {
return layer_create_report_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDebugReportCallbackEXT *pMsgCallback) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
if (VK_SUCCESS == res) {
lock_guard_t lock(global_lock);
res = PostCallRecordCreateDebugReportCallbackEXT(instance_data, pCreateInfo, pAllocator, pMsgCallback);
}
return res;
}
static void PostCallDestroyDebugReportCallbackEXT(instance_layer_data *instance_data, VkDebugReportCallbackEXT msgCallback,
const VkAllocationCallbacks *pAllocator) {
layer_destroy_report_callback(instance_data->report_data, msgCallback, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
const VkAllocationCallbacks *pAllocator) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
lock_guard_t lock(global_lock);
PostCallDestroyDebugReportCallbackEXT(instance_data, msgCallback, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
VkLayerProperties *pProperties) {
return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
return VK_ERROR_LAYER_NOT_PRESENT;
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
uint32_t *pCount, VkExtensionProperties *pProperties) {
if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
return util_GetExtensionProperties(1, device_extensions, pCount, pProperties);
assert(physicalDevice);
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
}
static bool PreCallValidateEnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
bool skip = false;
if (instance_data) {
// For this instance, flag when EnumeratePhysicalDeviceGroups goes to QUERY_COUNT and then QUERY_DETAILS.
if (NULL != pPhysicalDeviceGroupProperties) {
if (UNCALLED == instance_data->vkEnumeratePhysicalDeviceGroupsState) {
// Flag warning here. You can call this without having queried the count, but it may not be
// robust on platforms with multiple physical devices.
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, kVUID_Core_DevLimit_MissingQueryCount,
"Call sequence has vkEnumeratePhysicalDeviceGroups() w/ non-NULL "
"pPhysicalDeviceGroupProperties. You should first call vkEnumeratePhysicalDeviceGroups() w/ "
"NULL pPhysicalDeviceGroupProperties to query pPhysicalDeviceGroupCount.");
} // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
else if (instance_data->physical_device_groups_count != *pPhysicalDeviceGroupCount) {
// Having actual count match count from app is not a requirement, so this can be a warning
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, kVUID_Core_DevLimit_CountMismatch,
"Call to vkEnumeratePhysicalDeviceGroups() w/ pPhysicalDeviceGroupCount value %u, but actual count "
"supported by this instance is %u.",
*pPhysicalDeviceGroupCount, instance_data->physical_device_groups_count);
}
}
} else {
log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0,
kVUID_Core_DevLimit_InvalidInstance,
"Invalid instance (0x%" PRIx64 ") passed into vkEnumeratePhysicalDeviceGroups().", HandleToUint64(instance));
}
return skip;
}
static void PreCallRecordEnumeratePhysicalDeviceGroups(instance_layer_data *instance_data,
VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
if (instance_data) {
// For this instance, flag when EnumeratePhysicalDeviceGroups goes to QUERY_COUNT and then QUERY_DETAILS.
if (NULL == pPhysicalDeviceGroupProperties) {
instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_COUNT;
} else {
instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_DETAILS;
}
}
}
static void PostCallRecordEnumeratePhysicalDeviceGroups(instance_layer_data *instance_data, uint32_t *pPhysicalDeviceGroupCount,
VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
if (NULL == pPhysicalDeviceGroupProperties) {
instance_data->physical_device_groups_count = *pPhysicalDeviceGroupCount;
} else { // Save physical devices
for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) {
for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) {
VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j];
auto &phys_device_state = instance_data->physical_device_map[cur_phys_dev];
phys_device_state.phys_device = cur_phys_dev;
// Init actual features for each physical device
instance_data->dispatch_table.GetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features2.features);
}
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
bool skip = false;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
skip = PreCallValidateEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
PreCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupProperties);
VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount,
pPhysicalDeviceGroupProperties);
if (result == VK_SUCCESS) {
PostCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroupsKHR(
VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
bool skip = false;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
skip = PreCallValidateEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
PreCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupProperties);
VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroupsKHR(instance, pPhysicalDeviceGroupCount,
pPhysicalDeviceGroupProperties);
if (result == VK_SUCCESS) {
PostCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
}
return result;
}
static bool PreCallValidateCreateDescriptorUpdateTemplate(const char *func_name, layer_data *device_data,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
bool skip = false;
const auto layout = GetDescriptorSetLayout(device_data, pCreateInfo->descriptorSetLayout);
if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) {
auto ds_uint = HandleToUint64(pCreateInfo->descriptorSetLayout);
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
ds_uint, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350",
"%s: Invalid pCreateInfo->descriptorSetLayout (%" PRIx64 ")", func_name, ds_uint);
} else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) {
auto bind_point = pCreateInfo->pipelineBindPoint;
bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE);
if (!valid_bp) {
skip |=
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351",
"%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name, static_cast<uint32_t>(bind_point));
}
const auto pipeline_layout = GetPipelineLayout(device_data, pCreateInfo->pipelineLayout);
if (!pipeline_layout) {
uint64_t pl_uint = HandleToUint64(pCreateInfo->pipelineLayout);
skip |=
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
pl_uint, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352",
"%s: Invalid pCreateInfo->pipelineLayout (%" PRIx64 ")", func_name, pl_uint);
} else {
const uint32_t pd_set = pCreateInfo->set;
if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] ||
!pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) {
uint64_t pl_uint = HandleToUint64(pCreateInfo->pipelineLayout);
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, pl_uint,
"VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353",
"%s: pCreateInfo->set (%" PRIu32
") does not refer to the push descriptor set layout for "
"pCreateInfo->pipelineLayout (%" PRIx64 ").",
func_name, pd_set, pl_uint);
}
}
}
return skip;
}
static void PostCallRecordCreateDescriptorUpdateTemplate(layer_data *device_data,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
// Shadow template createInfo for later updates
safe_VkDescriptorUpdateTemplateCreateInfo *local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfo(pCreateInfo);
std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info));
device_data->desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplate(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", device_data, pCreateInfo,
pAllocator, pDescriptorUpdateTemplate);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
if (!skip) {
lock.unlock();
result =
device_data->dispatch_table.CreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateDescriptorUpdateTemplate(device_data, pCreateInfo, pDescriptorUpdateTemplate);
}
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", device_data, pCreateInfo,
pAllocator, pDescriptorUpdateTemplate);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
if (!skip) {
lock.unlock();
result = device_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator,
pDescriptorUpdateTemplate);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateDescriptorUpdateTemplate(device_data, pCreateInfo, pDescriptorUpdateTemplate);
}
}
return result;
}
static void PreCallRecordDestroyDescriptorUpdateTemplate(layer_data *device_data,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate) {
device_data->desc_template_map.erase(descriptorUpdateTemplate);
}
VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyDescriptorUpdateTemplate(device_data, descriptorUpdateTemplate);
lock.unlock();
device_data->dispatch_table.DestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyDescriptorUpdateTemplate(device_data, descriptorUpdateTemplate);
lock.unlock();
device_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
}
// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSetsWithTemplate()
static void PostCallRecordUpdateDescriptorSetWithTemplate(layer_data *device_data, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const void *pData) {
auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
if (template_map_entry == device_data->desc_template_map.end()) {
assert(0);
}
cvdescriptorset::PerformUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_map_entry->second, pData);
}
VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const void *pData) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
device_data->dispatch_table.UpdateDescriptorSetWithTemplate(device, descriptorSet, descriptorUpdateTemplate, pData);
unique_lock_t lock(global_lock);
PostCallRecordUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
}
VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const void *pData) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
device_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData);
unique_lock_t lock(global_lock);
PostCallRecordUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
}
static bool PreCallValidateCmdPushDescriptorSetWithTemplateKHR(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
return ValidateCmd(dev_data, cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, "vkCmdPushDescriptorSetWithTemplateKHR()");
}
VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
VkPipelineLayout layout, uint32_t set, const void *pData) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
// Minimal validation for command buffer state
if (cb_state) {
skip |= PreCallValidateCmdPushDescriptorSetWithTemplateKHR(dev_data, cb_state);
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData);
}
}
static void PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_layer_data *instanceData,
VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
void *pProperties) {
unique_lock_t lock(global_lock);
auto physical_device_state = GetPhysicalDeviceState(instanceData, physicalDevice);
if (*pPropertyCount) {
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) {
physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT;
}
physical_device_state->display_plane_property_count = *pPropertyCount;
}
if (pProperties) {
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_DETAILS) {
physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_DETAILS;
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
VkDisplayPlanePropertiesKHR *pProperties) {
VkResult result = VK_SUCCESS;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
result = instance_data->dispatch_table.GetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_data, physicalDevice, pPropertyCount, pProperties);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice,
uint32_t *pPropertyCount,
VkDisplayPlaneProperties2KHR *pProperties) {
VkResult result = VK_SUCCESS;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
result = instance_data->dispatch_table.GetPhysicalDeviceDisplayPlaneProperties2KHR(physicalDevice, pPropertyCount, pProperties);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_data, physicalDevice, pPropertyCount, pProperties);
}
return result;
}
static bool ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data *instance_data,
VkPhysicalDevice physicalDevice, uint32_t planeIndex,
const char *api_name) {
bool skip = false;
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) {
skip |=
log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physicalDevice), kVUID_Core_Swapchain_GetSupportedDisplaysWithoutQuery,
"Potential problem with calling %s() without first querying vkGetPhysicalDeviceDisplayPlanePropertiesKHR "
"or vkGetPhysicalDeviceDisplayPlaneProperties2KHR.",
api_name);
} else {
if (planeIndex >= physical_device_state->display_plane_property_count) {
skip |= log_msg(
instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physicalDevice), "VUID-vkGetDisplayPlaneSupportedDisplaysKHR-planeIndex-01249",
"%s(): planeIndex must be in the range [0, %d] that was returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR "
"or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?",
api_name, physical_device_state->display_plane_property_count - 1);
}
}
return skip;
}
static bool PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
uint32_t planeIndex) {
bool skip = false;
lock_guard_t lock(global_lock);
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
"vkGetDisplayPlaneSupportedDisplaysKHR");
return skip;
}
VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
bool skip = PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_data, physicalDevice, planeIndex);
if (!skip) {
result =
instance_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays);
}
return result;
}
static bool PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
uint32_t planeIndex) {
bool skip = false;
lock_guard_t lock(global_lock);
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
"vkGetDisplayPlaneCapabilitiesKHR");
return skip;
}
VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
bool skip = PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_data, physicalDevice, planeIndex);
if (!skip) {
result = instance_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo,
VkDisplayPlaneCapabilities2KHR *pCapabilities) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
bool skip = PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_data, physicalDevice, pDisplayPlaneInfo->planeIndex);
if (!skip) {
result = instance_data->dispatch_table.GetDisplayPlaneCapabilities2KHR(physicalDevice, pDisplayPlaneInfo, pCapabilities);
}
return result;
}
static void PreCallRecordDebugMarkerSetObjectNameEXT(layer_data *dev_data, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
if (pNameInfo->pObjectName) {
dev_data->report_data->debugObjectNameMap->insert(
std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->object, pNameInfo->pObjectName));
} else {
dev_data->report_data->debugObjectNameMap->erase(pNameInfo->object);
}
}
VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
unique_lock_t lock(global_lock);
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
PreCallRecordDebugMarkerSetObjectNameEXT(device_data, pNameInfo);
lock.unlock();
VkResult result = device_data->dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(VkDevice device, VkDebugMarkerObjectTagInfoEXT *pTagInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = device_data->dispatch_table.DebugMarkerSetObjectTagEXT(device, pTagInfo);
return result;
}
static bool PreCallValidateCmdDebugMarkerBeginEXT(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
return ValidateCmd(dev_data, cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()");
}
VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
// Minimal validation for command buffer state
if (cb_state) {
skip |= PreCallValidateCmdDebugMarkerBeginEXT(device_data, cb_state);
}
lock.unlock();
if (!skip) {
device_data->dispatch_table.CmdDebugMarkerBeginEXT(commandBuffer, pMarkerInfo);
}
}
static bool PreCallValidateCmdDebugMarkerEndEXT(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
return ValidateCmd(dev_data, cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()");
}
VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
// Minimal validation for command buffer state
if (cb_state) {
skip |= PreCallValidateCmdDebugMarkerEndEXT(device_data, cb_state);
}
lock.unlock();
if (!skip) {
device_data->dispatch_table.CmdDebugMarkerEndEXT(commandBuffer);
}
}
VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
device_data->dispatch_table.CmdDebugMarkerInsertEXT(commandBuffer, pMarkerInfo);
}
static bool PreCallValidateCmdSetDiscardRectangleEXT(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
return ValidateCmd(dev_data, cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()");
}
VKAPI_ATTR void VKAPI_CALL CmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
// Minimal validation for command buffer state
if (cb_state) {
skip |= PreCallValidateCmdSetDiscardRectangleEXT(dev_data, cb_state);
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdSetDiscardRectangleEXT(commandBuffer, firstDiscardRectangle, discardRectangleCount,
pDiscardRectangles);
}
}
static bool PreCallValidateCmdSetSampleLocationsEXT(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
return ValidateCmd(dev_data, cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()");
}
VKAPI_ATTR void VKAPI_CALL CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
const VkSampleLocationsInfoEXT *pSampleLocationsInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
// Minimal validation for command buffer state
if (cb_state) {
skip |= PreCallValidateCmdSetSampleLocationsEXT(dev_data, cb_state);
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdSetSampleLocationsEXT(commandBuffer, pSampleLocationsInfo);
}
}
static bool PreCallValidateCmdDrawIndirectCountKHR(layer_data *dev_data, VkCommandBuffer commandBuffer, VkBuffer buffer,
VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset,
uint32_t stride, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
BUFFER_STATE **count_buffer_state, bool indexed, VkPipelineBindPoint bind_point,
const char *caller) {
bool skip = false;
if (offset & 3) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndirectCountKHR-offset-03108",
"vkCmdDrawIndirectCountKHR() parameter, VkDeviceSize offset (0x%" PRIxLEAST64 "), is not a multiple of 4.",
offset);
}
if (countBufferOffset & 3) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndirectCountKHR-countBufferOffset-03109",
"vkCmdDrawIndirectCountKHR() parameter, VkDeviceSize countBufferOffset (0x%" PRIxLEAST64
"), is not a multiple of 4.",
countBufferOffset);
}
if ((stride & 3) || stride < sizeof(VkDrawIndirectCommand)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndirectCountKHR-stride-03110",
"vkCmdDrawIndirectCountKHR() parameter, uint32_t stride (0x%" PRIxLEAST32
"), is not a multiple of 4 or smaller than sizeof (VkDrawIndirectCommand).",
stride);
}
skip |= ValidateCmdDrawType(dev_data, commandBuffer, indexed, bind_point, CMD_DRAWINDIRECTCOUNTKHR, cb_state, caller,
VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdDrawIndirectCountKHR-commandBuffer-cmdpool",
"VUID-vkCmdDrawIndirectCountKHR-renderpass", "VUID-vkCmdDrawIndirectCountKHR-None-03119",
"VUID-vkCmdDrawIndirectCountKHR-None-03120");
*buffer_state = GetBufferState(dev_data, buffer);
*count_buffer_state = GetBufferState(dev_data, countBuffer);
skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, "VUID-vkCmdDrawIndirectCountKHR-buffer-03104");
skip |=
ValidateMemoryIsBoundToBuffer(dev_data, *count_buffer_state, caller, "VUID-vkCmdDrawIndirectCountKHR-countBuffer-03106");
return skip;
}
static void PreCallRecordCmdDrawIndirectCountKHR(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
BUFFER_STATE *buffer_state, BUFFER_STATE *count_buffer_state) {
UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
AddCommandBufferBindingBuffer(dev_data, cb_state, count_buffer_state);
}
VKAPI_ATTR void VKAPI_CALL CmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
uint32_t stride) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
BUFFER_STATE *count_buffer_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDrawIndirectCountKHR(dev_data, commandBuffer, buffer, offset, countBuffer, countBufferOffset,
stride, &cb_state, &buffer_state, &count_buffer_state, false,
VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirectCountKHR()");
if (!skip) {
PreCallRecordCmdDrawIndirectCountKHR(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state, count_buffer_state);
lock.unlock();
dev_data->dispatch_table.CmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
maxDrawCount, stride);
}
}
static bool PreCallValidateCmdDrawIndexedIndirectCountKHR(layer_data *dev_data, VkCommandBuffer commandBuffer, VkBuffer buffer,
VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset,
uint32_t stride, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
BUFFER_STATE **count_buffer_state, bool indexed,
VkPipelineBindPoint bind_point, const char *caller) {
bool skip = false;
if (offset & 3) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndexedIndirectCountKHR-offset-03140",
"vkCmdDrawIndexedIndirectCountKHR() parameter, VkDeviceSize offset (0x%" PRIxLEAST64
"), is not a multiple of 4.",
offset);
}
if (countBufferOffset & 3) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndexedIndirectCountKHR-countBufferOffset-03141",
"vkCmdDrawIndexedIndirectCountKHR() parameter, VkDeviceSize countBufferOffset (0x%" PRIxLEAST64
"), is not a multiple of 4.",
countBufferOffset);
}
if ((stride & 3) || stride < sizeof(VkDrawIndexedIndirectCommand)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndexedIndirectCountKHR-stride-03142",
"vkCmdDrawIndexedIndirectCountKHR() parameter, uint32_t stride (0x%" PRIxLEAST32
"), is not a multiple of 4 or smaller than sizeof (VkDrawIndexedIndirectCommand).",
stride);
}
skip |= ValidateCmdDrawType(
dev_data, commandBuffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECTCOUNTKHR, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdDrawIndexedIndirectCountKHR-commandBuffer-cmdpool", "VUID-vkCmdDrawIndexedIndirectCountKHR-renderpass",
"VUID-vkCmdDrawIndexedIndirectCountKHR-None-03151", "VUID-vkCmdDrawIndexedIndirectCountKHR-None-03152");
*buffer_state = GetBufferState(dev_data, buffer);
*count_buffer_state = GetBufferState(dev_data, countBuffer);
skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, "VUID-vkCmdDrawIndexedIndirectCountKHR-buffer-03136");
skip |= ValidateMemoryIsBoundToBuffer(dev_data, *count_buffer_state, caller,
"VUID-vkCmdDrawIndexedIndirectCountKHR-countBuffer-03138");
return skip;
}
static void PreCallRecordCmdDrawIndexedIndirectCountKHR(layer_data *dev_data, GLOBAL_CB_NODE *cb_state,
VkPipelineBindPoint bind_point, BUFFER_STATE *buffer_state,
BUFFER_STATE *count_buffer_state) {
UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
AddCommandBufferBindingBuffer(dev_data, cb_state, count_buffer_state);
}
VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset,
uint32_t maxDrawCount, uint32_t stride) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
BUFFER_STATE *count_buffer_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDrawIndexedIndirectCountKHR(
dev_data, commandBuffer, buffer, offset, countBuffer, countBufferOffset, stride, &cb_state, &buffer_state,
&count_buffer_state, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirectCountKHR()");
if (!skip) {
PreCallRecordCmdDrawIndexedIndirectCountKHR(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state,
count_buffer_state);
lock.unlock();
dev_data->dispatch_table.CmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
maxDrawCount, stride);
}
}
static bool PreCallValidateCmdDrawMeshTasksNV(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
GLOBAL_CB_NODE **cb_state, const char *caller) {
bool skip = ValidateCmdDrawType(
dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWMESHTASKSNV, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdDrawMeshTasksNV-commandBuffer-cmdpool", "VUID-vkCmdDrawMeshTasksNV-renderpass",
"VUID-vkCmdDrawMeshTasksNV-None-02125", "VUID-vkCmdDrawMeshTasksNV-None-02126");
return skip;
}
static void PreCallRecordCmdDrawMeshTasksNV(layer_data *dev_data, GLOBAL_CB_NODE *cb_state,
VkPipelineBindPoint bind_point) {
UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
}
VKAPI_ATTR void VKAPI_CALL CmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
bool skip = false;
unique_lock_t lock(global_lock);
skip |= PreCallValidateCmdDrawMeshTasksNV(dev_data, commandBuffer, /* indexed */ false, VK_PIPELINE_BIND_POINT_GRAPHICS,
&cb_state, "vkCmdDrawMeshTasksNV()");
if (!skip) {
PreCallRecordCmdDrawMeshTasksNV(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
lock.unlock();
dev_data->dispatch_table.CmdDrawMeshTasksNV(commandBuffer, taskCount, firstTask);
}
}
static bool PreCallValidateCmdDrawMeshTasksIndirectNV(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer,
bool indexed, VkPipelineBindPoint bind_point,
GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
const char *caller) {
bool skip = ValidateCmdDrawType(
dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWMESHTASKSINDIRECTNV, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdDrawMeshTasksIndirectNV-commandBuffer-cmdpool", "VUID-vkCmdDrawMeshTasksIndirectNV-renderpass",
"VUID-vkCmdDrawMeshTasksIndirectNV-None-02154", "VUID-vkCmdDrawMeshTasksIndirectNV-None-02155");
*buffer_state = GetBufferState(dev_data, buffer);
skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, "VUID-vkCmdDrawMeshTasksIndirectNV-buffer-02143");
return skip;
}
static void PreCallRecordCmdDrawMeshTasksIndirectNV(layer_data *dev_data, GLOBAL_CB_NODE *cb_state,
VkPipelineBindPoint bind_point, BUFFER_STATE *buffer_state) {
UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
if (buffer_state) {
AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
}
}
VKAPI_ATTR void VKAPI_CALL CmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
uint32_t drawCount, uint32_t stride) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
bool skip = false;
unique_lock_t lock(global_lock);
skip |= PreCallValidateCmdDrawMeshTasksIndirectNV(dev_data, commandBuffer, buffer, /* indexed */ false,
VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, &buffer_state,
"vkCmdDrawMeshTasksIndirectNV()");
if (!skip) {
PreCallRecordCmdDrawMeshTasksIndirectNV(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
lock.unlock();
dev_data->dispatch_table.CmdDrawMeshTasksIndirectNV(commandBuffer, buffer, offset, drawCount, stride);
}
}
static bool PreCallValidateCmdDrawMeshTasksIndirectCountNV(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer,
VkBuffer count_buffer, bool indexed, VkPipelineBindPoint bind_point,
GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
BUFFER_STATE **count_buffer_state, const char *caller) {
bool skip = ValidateCmdDrawType(
dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWMESHTASKSINDIRECTCOUNTNV, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdDrawMeshTasksIndirectCountNV-commandBuffer-cmdpool", "VUID-vkCmdDrawMeshTasksIndirectCountNV-renderpass",
"VUID-vkCmdDrawMeshTasksIndirectCountNV-None-02189", "VUID-vkCmdDrawMeshTasksIndirectCountNV-None-02190");
*buffer_state = GetBufferState(dev_data, buffer);
*count_buffer_state = GetBufferState(dev_data, count_buffer);
skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, "VUID-vkCmdDrawMeshTasksIndirectCountNV-buffer-02176");
skip |= ValidateMemoryIsBoundToBuffer(dev_data, *count_buffer_state, caller,
"VUID-vkCmdDrawMeshTasksIndirectCountNV-countBuffer-02178");
return skip;
}
static void PreCallRecordCmdDrawMeshTasksIndirectCountNV(layer_data *dev_data, GLOBAL_CB_NODE *cb_state,
VkPipelineBindPoint bind_point, BUFFER_STATE *buffer_state,
BUFFER_STATE *count_buffer_state) {
UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
if (buffer_state) {
AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
}
if (count_buffer_state) {
AddCommandBufferBindingBuffer(dev_data, cb_state, count_buffer_state);
}
}
VKAPI_ATTR void VKAPI_CALL CmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset,
uint32_t maxDrawCount, uint32_t stride) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
BUFFER_STATE *count_buffer_state = nullptr;
bool skip = false;
unique_lock_t lock(global_lock);
skip |= PreCallValidateCmdDrawMeshTasksIndirectCountNV(dev_data, commandBuffer, buffer, countBuffer, /* indexed */ false,
VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, &buffer_state,
&count_buffer_state, "vkCmdDrawMeshTasksIndirectCountNV()");
if (!skip) {
PreCallRecordCmdDrawMeshTasksIndirectCountNV(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state,
count_buffer_state);
lock.unlock();
dev_data->dispatch_table.CmdDrawMeshTasksIndirectCountNV(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
maxDrawCount, stride);
}
}
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName);
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName);
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName);
// Map of all APIs to be intercepted by this layer
static const std::unordered_map<std::string, void *> name_to_funcptr_map = {
{"vkGetInstanceProcAddr", (void *)GetInstanceProcAddr},
{"vk_layerGetPhysicalDeviceProcAddr", (void *)GetPhysicalDeviceProcAddr},
{"vkGetDeviceProcAddr", (void *)GetDeviceProcAddr},
{"vkCreateInstance", (void *)CreateInstance},
{"vkCreateDevice", (void *)CreateDevice},
{"vkEnumeratePhysicalDevices", (void *)EnumeratePhysicalDevices},
{"vkGetPhysicalDeviceQueueFamilyProperties", (void *)GetPhysicalDeviceQueueFamilyProperties},
{"vkDestroyInstance", (void *)DestroyInstance},
{"vkEnumerateInstanceLayerProperties", (void *)EnumerateInstanceLayerProperties},
{"vkEnumerateDeviceLayerProperties", (void *)EnumerateDeviceLayerProperties},
{"vkEnumerateInstanceExtensionProperties", (void *)EnumerateInstanceExtensionProperties},
{"vkEnumerateDeviceExtensionProperties", (void *)EnumerateDeviceExtensionProperties},
{"vkCreateDescriptorUpdateTemplate", (void *)CreateDescriptorUpdateTemplate},
{"vkCreateDescriptorUpdateTemplateKHR", (void *)CreateDescriptorUpdateTemplateKHR},
{"vkDestroyDescriptorUpdateTemplate", (void *)DestroyDescriptorUpdateTemplate},
{"vkDestroyDescriptorUpdateTemplateKHR", (void *)DestroyDescriptorUpdateTemplateKHR},
{"vkUpdateDescriptorSetWithTemplate", (void *)UpdateDescriptorSetWithTemplate},
{"vkUpdateDescriptorSetWithTemplateKHR", (void *)UpdateDescriptorSetWithTemplateKHR},
{"vkCmdPushDescriptorSetWithTemplateKHR", (void *)CmdPushDescriptorSetWithTemplateKHR},
{"vkCmdPushDescriptorSetKHR", (void *)CmdPushDescriptorSetKHR},
{"vkCreateSwapchainKHR", (void *)CreateSwapchainKHR},
{"vkDestroySwapchainKHR", (void *)DestroySwapchainKHR},
{"vkGetSwapchainImagesKHR", (void *)GetSwapchainImagesKHR},
{"vkAcquireNextImageKHR", (void *)AcquireNextImageKHR},
{"vkAcquireNextImage2KHR", (void *)AcquireNextImage2KHR},
{"vkQueuePresentKHR", (void *)QueuePresentKHR},
{"vkQueueSubmit", (void *)QueueSubmit},
{"vkWaitForFences", (void *)WaitForFences},
{"vkGetFenceStatus", (void *)GetFenceStatus},
{"vkQueueWaitIdle", (void *)QueueWaitIdle},
{"vkDeviceWaitIdle", (void *)DeviceWaitIdle},
{"vkGetDeviceQueue", (void *)GetDeviceQueue},
{"vkGetDeviceQueue2", (void *)GetDeviceQueue2},
{"vkDestroyDevice", (void *)DestroyDevice},
{"vkDestroyFence", (void *)DestroyFence},
{"vkResetFences", (void *)ResetFences},
{"vkDestroySemaphore", (void *)DestroySemaphore},
{"vkDestroyEvent", (void *)DestroyEvent},
{"vkDestroyQueryPool", (void *)DestroyQueryPool},
{"vkDestroyBuffer", (void *)DestroyBuffer},
{"vkDestroyBufferView", (void *)DestroyBufferView},
{"vkDestroyImage", (void *)DestroyImage},
{"vkDestroyImageView", (void *)DestroyImageView},
{"vkDestroyShaderModule", (void *)DestroyShaderModule},
{"vkDestroyPipeline", (void *)DestroyPipeline},
{"vkDestroyPipelineLayout", (void *)DestroyPipelineLayout},
{"vkDestroySampler", (void *)DestroySampler},
{"vkDestroyDescriptorSetLayout", (void *)DestroyDescriptorSetLayout},
{"vkDestroyDescriptorPool", (void *)DestroyDescriptorPool},
{"vkDestroyFramebuffer", (void *)DestroyFramebuffer},
{"vkDestroyRenderPass", (void *)DestroyRenderPass},
{"vkCreateBuffer", (void *)CreateBuffer},
{"vkCreateBufferView", (void *)CreateBufferView},
{"vkCreateImage", (void *)CreateImage},
{"vkCreateImageView", (void *)CreateImageView},
{"vkCreateFence", (void *)CreateFence},
{"vkCreatePipelineCache", (void *)CreatePipelineCache},
{"vkDestroyPipelineCache", (void *)DestroyPipelineCache},
{"vkGetPipelineCacheData", (void *)GetPipelineCacheData},
{"vkMergePipelineCaches", (void *)MergePipelineCaches},
{"vkCreateGraphicsPipelines", (void *)CreateGraphicsPipelines},
{"vkCreateComputePipelines", (void *)CreateComputePipelines},
{"vkCreateSampler", (void *)CreateSampler},
{"vkCreateDescriptorSetLayout", (void *)CreateDescriptorSetLayout},
{"vkCreatePipelineLayout", (void *)CreatePipelineLayout},
{"vkCreateDescriptorPool", (void *)CreateDescriptorPool},
{"vkResetDescriptorPool", (void *)ResetDescriptorPool},
{"vkAllocateDescriptorSets", (void *)AllocateDescriptorSets},
{"vkFreeDescriptorSets", (void *)FreeDescriptorSets},
{"vkUpdateDescriptorSets", (void *)UpdateDescriptorSets},
{"vkCreateCommandPool", (void *)CreateCommandPool},
{"vkDestroyCommandPool", (void *)DestroyCommandPool},
{"vkResetCommandPool", (void *)ResetCommandPool},
{"vkCreateQueryPool", (void *)CreateQueryPool},
{"vkAllocateCommandBuffers", (void *)AllocateCommandBuffers},
{"vkFreeCommandBuffers", (void *)FreeCommandBuffers},
{"vkBeginCommandBuffer", (void *)BeginCommandBuffer},
{"vkEndCommandBuffer", (void *)EndCommandBuffer},
{"vkResetCommandBuffer", (void *)ResetCommandBuffer},
{"vkCmdBindPipeline", (void *)CmdBindPipeline},
{"vkCmdSetViewport", (void *)CmdSetViewport},
{"vkCmdSetScissor", (void *)CmdSetScissor},
{"vkCmdSetLineWidth", (void *)CmdSetLineWidth},
{"vkCmdSetDepthBias", (void *)CmdSetDepthBias},
{"vkCmdSetBlendConstants", (void *)CmdSetBlendConstants},
{"vkCmdSetDepthBounds", (void *)CmdSetDepthBounds},
{"vkCmdSetStencilCompareMask", (void *)CmdSetStencilCompareMask},
{"vkCmdSetStencilWriteMask", (void *)CmdSetStencilWriteMask},
{"vkCmdSetStencilReference", (void *)CmdSetStencilReference},
{"vkCmdBindDescriptorSets", (void *)CmdBindDescriptorSets},
{"vkCmdBindVertexBuffers", (void *)CmdBindVertexBuffers},
{"vkCmdBindIndexBuffer", (void *)CmdBindIndexBuffer},
{"vkCmdDraw", (void *)CmdDraw},
{"vkCmdDrawIndexed", (void *)CmdDrawIndexed},
{"vkCmdDrawIndirect", (void *)CmdDrawIndirect},
{"vkCmdDrawIndexedIndirect", (void *)CmdDrawIndexedIndirect},
{"vkCmdDispatch", (void *)CmdDispatch},
{"vkCmdDispatchIndirect", (void *)CmdDispatchIndirect},
{"vkCmdCopyBuffer", (void *)CmdCopyBuffer},
{"vkCmdCopyImage", (void *)CmdCopyImage},
{"vkCmdBlitImage", (void *)CmdBlitImage},
{"vkCmdCopyBufferToImage", (void *)CmdCopyBufferToImage},
{"vkCmdCopyImageToBuffer", (void *)CmdCopyImageToBuffer},
{"vkCmdUpdateBuffer", (void *)CmdUpdateBuffer},
{"vkCmdFillBuffer", (void *)CmdFillBuffer},
{"vkCmdClearColorImage", (void *)CmdClearColorImage},
{"vkCmdClearDepthStencilImage", (void *)CmdClearDepthStencilImage},
{"vkCmdClearAttachments", (void *)CmdClearAttachments},
{"vkCmdResolveImage", (void *)CmdResolveImage},
{"vkGetImageSubresourceLayout", (void *)GetImageSubresourceLayout},
{"vkCmdSetEvent", (void *)CmdSetEvent},
{"vkCmdResetEvent", (void *)CmdResetEvent},
{"vkCmdWaitEvents", (void *)CmdWaitEvents},
{"vkCmdPipelineBarrier", (void *)CmdPipelineBarrier},
{"vkCmdBeginQuery", (void *)CmdBeginQuery},
{"vkCmdEndQuery", (void *)CmdEndQuery},
{"vkCmdResetQueryPool", (void *)CmdResetQueryPool},
{"vkCmdCopyQueryPoolResults", (void *)CmdCopyQueryPoolResults},
{"vkCmdPushConstants", (void *)CmdPushConstants},
{"vkCmdWriteTimestamp", (void *)CmdWriteTimestamp},
{"vkCreateFramebuffer", (void *)CreateFramebuffer},
{"vkCreateShaderModule", (void *)CreateShaderModule},
{"vkCreateRenderPass", (void *)CreateRenderPass},
{"vkCmdBeginRenderPass", (void *)CmdBeginRenderPass},
{"vkCmdNextSubpass", (void *)CmdNextSubpass},
{"vkCmdEndRenderPass", (void *)CmdEndRenderPass},
{"vkCmdExecuteCommands", (void *)CmdExecuteCommands},
{"vkCmdDebugMarkerBeginEXT", (void *)CmdDebugMarkerBeginEXT},
{"vkCmdDebugMarkerEndEXT", (void *)CmdDebugMarkerEndEXT},
{"vkCmdDebugMarkerInsertEXT", (void *)CmdDebugMarkerInsertEXT},
{"vkDebugMarkerSetObjectNameEXT", (void *)DebugMarkerSetObjectNameEXT},
{"vkDebugMarkerSetObjectTagEXT", (void *)DebugMarkerSetObjectTagEXT},
{"vkSetEvent", (void *)SetEvent},
{"vkMapMemory", (void *)MapMemory},
{"vkUnmapMemory", (void *)UnmapMemory},
{"vkFlushMappedMemoryRanges", (void *)FlushMappedMemoryRanges},
{"vkInvalidateMappedMemoryRanges", (void *)InvalidateMappedMemoryRanges},
{"vkAllocateMemory", (void *)AllocateMemory},
{"vkFreeMemory", (void *)FreeMemory},
{"vkBindBufferMemory", (void *)BindBufferMemory},
{"vkBindBufferMemory2", (void *)BindBufferMemory2},
{"vkBindBufferMemory2KHR", (void *)BindBufferMemory2KHR},
{"vkGetBufferMemoryRequirements", (void *)GetBufferMemoryRequirements},
{"vkGetBufferMemoryRequirements2", (void *)GetBufferMemoryRequirements2},
{"vkGetBufferMemoryRequirements2KHR", (void *)GetBufferMemoryRequirements2KHR},
{"vkGetImageMemoryRequirements", (void *)GetImageMemoryRequirements},
{"vkGetImageMemoryRequirements2", (void *)GetImageMemoryRequirements2},
{"vkGetImageMemoryRequirements2KHR", (void *)GetImageMemoryRequirements2KHR},
{"vkGetImageSparseMemoryRequirements", (void *)GetImageSparseMemoryRequirements},
{"vkGetImageSparseMemoryRequirements2", (void *)GetImageSparseMemoryRequirements2},
{"vkGetImageSparseMemoryRequirements2KHR", (void *)GetImageSparseMemoryRequirements2KHR},
{"vkGetPhysicalDeviceSparseImageFormatProperties", (void *)GetPhysicalDeviceSparseImageFormatProperties},
{"vkGetPhysicalDeviceSparseImageFormatProperties2", (void *)GetPhysicalDeviceSparseImageFormatProperties2},
{"vkGetPhysicalDeviceSparseImageFormatProperties2KHR", (void *)GetPhysicalDeviceSparseImageFormatProperties2KHR},
{"vkGetQueryPoolResults", (void *)GetQueryPoolResults},
{"vkBindImageMemory", (void *)BindImageMemory},
{"vkBindImageMemory2", (void *)BindImageMemory2},
{"vkBindImageMemory2KHR", (void *)BindImageMemory2KHR},
{"vkQueueBindSparse", (void *)QueueBindSparse},
{"vkCreateSemaphore", (void *)CreateSemaphore},
{"vkCreateEvent", (void *)CreateEvent},
#ifdef VK_USE_PLATFORM_ANDROID_KHR
{"vkCreateAndroidSurfaceKHR", (void *)CreateAndroidSurfaceKHR},
#endif
#ifdef VK_USE_PLATFORM_MIR_KHR
{"vkCreateMirSurfaceKHR", (void *)CreateMirSurfaceKHR},
{"vkGetPhysicalDeviceMirPresentationSupportKHR", (void *)GetPhysicalDeviceMirPresentationSupportKHR},
#endif
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
{"vkCreateWaylandSurfaceKHR", (void *)CreateWaylandSurfaceKHR},
{"vkGetPhysicalDeviceWaylandPresentationSupportKHR", (void *)GetPhysicalDeviceWaylandPresentationSupportKHR},
#endif
#ifdef VK_USE_PLATFORM_WIN32_KHR
{"vkCreateWin32SurfaceKHR", (void *)CreateWin32SurfaceKHR},
{"vkGetPhysicalDeviceWin32PresentationSupportKHR", (void *)GetPhysicalDeviceWin32PresentationSupportKHR},
{"vkImportSemaphoreWin32HandleKHR", (void *)ImportSemaphoreWin32HandleKHR},
{"vkGetSemaphoreWin32HandleKHR", (void *)GetSemaphoreWin32HandleKHR},
{"vkImportFenceWin32HandleKHR", (void *)ImportFenceWin32HandleKHR},
{"vkGetFenceWin32HandleKHR", (void *)GetFenceWin32HandleKHR},
#endif
#ifdef VK_USE_PLATFORM_XCB_KHR
{"vkCreateXcbSurfaceKHR", (void *)CreateXcbSurfaceKHR},
{"vkGetPhysicalDeviceXcbPresentationSupportKHR", (void *)GetPhysicalDeviceXcbPresentationSupportKHR},
#endif
#ifdef VK_USE_PLATFORM_XLIB_KHR
{"vkCreateXlibSurfaceKHR", (void *)CreateXlibSurfaceKHR},
{"vkGetPhysicalDeviceXlibPresentationSupportKHR", (void *)GetPhysicalDeviceXlibPresentationSupportKHR},
#endif
#ifdef VK_USE_PLATFORM_IOS_MVK
{"vkCreateIOSSurfaceMVK", (void *)CreateIOSSurfaceMVK},
#endif
#ifdef VK_USE_PLATFORM_MACOS_MVK
{"vkCreateMacOSSurfaceMVK", (void *)CreateMacOSSurfaceMVK},
#endif
{"vkCreateDisplayPlaneSurfaceKHR", (void *)CreateDisplayPlaneSurfaceKHR},
{"vkDestroySurfaceKHR", (void *)DestroySurfaceKHR},
{"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", (void *)GetPhysicalDeviceSurfaceCapabilitiesKHR},
{"vkGetPhysicalDeviceSurfaceCapabilities2KHR", (void *)GetPhysicalDeviceSurfaceCapabilities2KHR},
{"vkGetPhysicalDeviceSurfaceCapabilities2EXT", (void *)GetPhysicalDeviceSurfaceCapabilities2EXT},
{"vkGetPhysicalDeviceSurfaceSupportKHR", (void *)GetPhysicalDeviceSurfaceSupportKHR},
{"vkGetPhysicalDeviceSurfacePresentModesKHR", (void *)GetPhysicalDeviceSurfacePresentModesKHR},
{"vkGetPhysicalDeviceSurfaceFormatsKHR", (void *)GetPhysicalDeviceSurfaceFormatsKHR},
{"vkGetPhysicalDeviceSurfaceFormats2KHR", (void *)GetPhysicalDeviceSurfaceFormats2KHR},
{"vkGetPhysicalDeviceQueueFamilyProperties2", (void *)GetPhysicalDeviceQueueFamilyProperties2},
{"vkGetPhysicalDeviceQueueFamilyProperties2KHR", (void *)GetPhysicalDeviceQueueFamilyProperties2KHR},
{"vkEnumeratePhysicalDeviceGroups", (void *)EnumeratePhysicalDeviceGroups},
{"vkEnumeratePhysicalDeviceGroupsKHR", (void *)EnumeratePhysicalDeviceGroupsKHR},
{"vkCreateDebugReportCallbackEXT", (void *)CreateDebugReportCallbackEXT},
{"vkDestroyDebugReportCallbackEXT", (void *)DestroyDebugReportCallbackEXT},
{"vkDebugReportMessageEXT", (void *)DebugReportMessageEXT},
{"vkGetPhysicalDeviceDisplayPlanePropertiesKHR", (void *)GetPhysicalDeviceDisplayPlanePropertiesKHR},
{"vkGetPhysicalDeviceDisplayPlaneProperties2KHR", (void *)GetPhysicalDeviceDisplayPlaneProperties2KHR},
{"vkGetDisplayPlaneSupportedDisplaysKHR", (void *)GetDisplayPlaneSupportedDisplaysKHR},
{"vkGetDisplayPlaneCapabilitiesKHR", (void *)GetDisplayPlaneCapabilitiesKHR},
{"vkGetDisplayPlaneCapabilities2KHR", (void *)GetDisplayPlaneCapabilities2KHR},
{"vkImportSemaphoreFdKHR", (void *)ImportSemaphoreFdKHR},
{"vkGetSemaphoreFdKHR", (void *)GetSemaphoreFdKHR},
{"vkImportFenceFdKHR", (void *)ImportFenceFdKHR},
{"vkGetFenceFdKHR", (void *)GetFenceFdKHR},
{"vkCreateValidationCacheEXT", (void *)CreateValidationCacheEXT},
{"vkDestroyValidationCacheEXT", (void *)DestroyValidationCacheEXT},
{"vkGetValidationCacheDataEXT", (void *)GetValidationCacheDataEXT},
{"vkMergeValidationCachesEXT", (void *)MergeValidationCachesEXT},
{"vkCmdSetDiscardRectangleEXT", (void *)CmdSetDiscardRectangleEXT},
{"vkCmdSetSampleLocationsEXT", (void *)CmdSetSampleLocationsEXT},
{"vkSetDebugUtilsObjectNameEXT", (void *)SetDebugUtilsObjectNameEXT},
{"vkSetDebugUtilsObjectTagEXT", (void *)SetDebugUtilsObjectTagEXT},
{"vkQueueBeginDebugUtilsLabelEXT", (void *)QueueBeginDebugUtilsLabelEXT},
{"vkQueueEndDebugUtilsLabelEXT", (void *)QueueEndDebugUtilsLabelEXT},
{"vkQueueInsertDebugUtilsLabelEXT", (void *)QueueInsertDebugUtilsLabelEXT},
{"vkCmdBeginDebugUtilsLabelEXT", (void *)CmdBeginDebugUtilsLabelEXT},
{"vkCmdEndDebugUtilsLabelEXT", (void *)CmdEndDebugUtilsLabelEXT},
{"vkCmdInsertDebugUtilsLabelEXT", (void *)CmdInsertDebugUtilsLabelEXT},
{"vkCreateDebugUtilsMessengerEXT", (void *)CreateDebugUtilsMessengerEXT},
{"vkDestroyDebugUtilsMessengerEXT", (void *)DestroyDebugUtilsMessengerEXT},
{"vkSubmitDebugUtilsMessageEXT", (void *)SubmitDebugUtilsMessageEXT},
{"vkCmdDrawIndirectCountKHR", (void *)CmdDrawIndirectCountKHR},
{"vkCmdDrawIndexedIndirectCountKHR", (void *)CmdDrawIndexedIndirectCountKHR},
{"vkCmdSetExclusiveScissorNV", (void *)CmdSetExclusiveScissorNV},
{"vkCmdBindShadingRateImageNV", (void *)CmdBindShadingRateImageNV},
{"vkCmdSetViewportShadingRatePaletteNV", (void *)CmdSetViewportShadingRatePaletteNV},
{"vkCmdDrawMeshTasksNV", (void *)CmdDrawMeshTasksNV},
{"vkCmdDrawMeshTasksIndirectNV", (void *)CmdDrawMeshTasksIndirectNV},
{"vkCmdDrawMeshTasksIndirectCountNV", (void *)CmdDrawMeshTasksIndirectCountNV},
{"vkCreateRaytracingPipelinesNVX", (void *)CreateRaytracingPipelinesNVX},
};
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
assert(device);
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!ApiParentExtensionEnabled(funcName, device_data->extensions.device_extension_set)) {
return nullptr;
}
// Is API to be intercepted by this layer?
const auto &item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
return reinterpret_cast<PFN_vkVoidFunction>(item->second);
}
auto &table = device_data->dispatch_table;
if (!table.GetDeviceProcAddr) return nullptr;
return table.GetDeviceProcAddr(device, funcName);
}
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
instance_layer_data *instance_data;
// Is API to be intercepted by this layer?
const auto &item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
return reinterpret_cast<PFN_vkVoidFunction>(item->second);
}
instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
auto &table = instance_data->dispatch_table;
if (!table.GetInstanceProcAddr) return nullptr;
return table.GetInstanceProcAddr(instance, funcName);
}
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
assert(instance);
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
auto &table = instance_data->dispatch_table;
if (!table.GetPhysicalDeviceProcAddr) return nullptr;
return table.GetPhysicalDeviceProcAddr(instance, funcName);
}
} // namespace core_validation
// loader-layer interface v0, just wrappers since there is only a layer
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
VkLayerProperties *pProperties) {
return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
VkLayerProperties *pProperties) {
// the layer command handles VK_NULL_HANDLE just fine internally
assert(physicalDevice == VK_NULL_HANDLE);
return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
// the layer command handles VK_NULL_HANDLE just fine internally
assert(physicalDevice == VK_NULL_HANDLE);
return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
return core_validation::GetDeviceProcAddr(dev, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
return core_validation::GetInstanceProcAddr(instance, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
const char *funcName) {
return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
assert(pVersionStruct != NULL);
assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
// Fill in the function pointers if our version is at least capable of having the structure contain them.
if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
}
if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
} else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
}
return VK_SUCCESS;
}
| 1 | 9,219 | What this does is narrow down what we are saying the descriptor set is requiring. This is a good thing, but doesn't go far enough, we are still potentially setting unused bindings to either a required state or an invalid one. set_binding_pair.second is a map of the binding/requirements from the bound shaders... and instead of marking all bindings, looking at that will mark only the used ones. Will open a new PR | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -1038,11 +1038,10 @@ func (core *coreService) BlockMetas(start uint64, count uint64) ([]*iotextypes.B
var (
tipHeight = core.bc.TipHeight()
- res []*iotextypes.BlockMeta
+ res = make([]*iotextypes.BlockMeta, 0)
)
if start > tipHeight {
- log.L().Debug("err in BlockMetas()", zap.Error(status.Error(codes.InvalidArgument, "start height should not exceed tip height")))
- return nil, nil
+ return nil, status.Error(codes.NotFound, "start height should not exceed tip height")
}
for height := start; height <= tipHeight && count > 0; height++ {
blockMeta, err := core.getBlockMetaByHeight(height) | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package api
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"math"
"math/big"
"strconv"
"time"
"github.com/golang/protobuf/ptypes"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-election/committee"
"github.com/iotexproject/iotex-proto/golang/iotexapi"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/execution/evm"
"github.com/iotexproject/iotex-core/action/protocol/poll"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/actpool"
logfilter "github.com/iotexproject/iotex-core/api/logfilter"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/blockchain/blockdao"
"github.com/iotexproject/iotex-core/blockchain/filedao"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/blockindex"
"github.com/iotexproject/iotex-core/blocksync"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/gasstation"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/tracer"
"github.com/iotexproject/iotex-core/pkg/version"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/state/factory"
)
// coreService provides api for user to interact with blockchain data
type coreService struct {
bc blockchain.Blockchain
bs blocksync.BlockSync
sf factory.Factory
dao blockdao.BlockDAO
indexer blockindex.Indexer
bfIndexer blockindex.BloomFilterIndexer
ap actpool.ActPool
gs *gasstation.GasStation
broadcastHandler BroadcastOutbound
cfg config.Config
registry *protocol.Registry
chainListener Listener
hasActionIndex bool
electionCommittee committee.Committee
readCache *ReadCache
}
// newcoreService creates a api server that contains major blockchain components
func newCoreService(
cfg config.Config,
chain blockchain.Blockchain,
bs blocksync.BlockSync,
sf factory.Factory,
dao blockdao.BlockDAO,
indexer blockindex.Indexer,
bfIndexer blockindex.BloomFilterIndexer,
actPool actpool.ActPool,
registry *protocol.Registry,
opts ...Option,
) (*coreService, error) {
apiCfg := Config{}
for _, opt := range opts {
if err := opt(&apiCfg); err != nil {
return nil, err
}
}
if cfg.API == (config.API{}) {
log.L().Warn("API server is not configured.")
cfg.API = config.Default.API
}
if cfg.API.RangeQueryLimit < uint64(cfg.API.TpsWindow) {
return nil, errors.New("range query upper limit cannot be less than tps window")
}
svr := &coreService{
bc: chain,
bs: bs,
sf: sf,
dao: dao,
indexer: indexer,
bfIndexer: bfIndexer,
ap: actPool,
broadcastHandler: apiCfg.broadcastHandler,
cfg: cfg,
registry: registry,
chainListener: NewChainListener(500),
gs: gasstation.NewGasStation(chain, sf.SimulateExecution, dao, cfg.API),
electionCommittee: apiCfg.electionCommittee,
readCache: NewReadCache(),
}
if _, ok := cfg.Plugins[config.GatewayPlugin]; ok {
svr.hasActionIndex = true
}
return svr, nil
}
// Account returns the metadata of an account
func (core *coreService) Account(addr address.Address) (*iotextypes.AccountMeta, *iotextypes.BlockIdentifier, error) {
addrStr := addr.String()
if addrStr == address.RewardingPoolAddr || addrStr == address.StakingBucketPoolAddr {
return core.getProtocolAccount(context.Background(), addrStr)
}
state, tipHeight, err := accountutil.AccountStateWithHeight(core.sf, addr)
if err != nil {
return nil, nil, status.Error(codes.NotFound, err.Error())
}
pendingNonce, err := core.ap.GetPendingNonce(addrStr)
if err != nil {
return nil, nil, status.Error(codes.Internal, err.Error())
}
if core.indexer == nil {
return nil, nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())
}
numActions, err := core.indexer.GetActionCountByAddress(hash.BytesToHash160(addr.Bytes()))
if err != nil {
return nil, nil, status.Error(codes.NotFound, err.Error())
}
accountMeta := &iotextypes.AccountMeta{
Address: addrStr,
Balance: state.Balance.String(),
Nonce: state.Nonce,
PendingNonce: pendingNonce,
NumActions: numActions,
IsContract: state.IsContract(),
}
if state.IsContract() {
var code evm.SerializableBytes
_, err = core.sf.State(&code, protocol.NamespaceOption(evm.CodeKVNameSpace), protocol.KeyOption(state.CodeHash))
if err != nil {
return nil, nil, status.Error(codes.NotFound, err.Error())
}
accountMeta.ContractByteCode = code
}
header, err := core.bc.BlockHeaderByHeight(tipHeight)
if err != nil {
return nil, nil, status.Error(codes.NotFound, err.Error())
}
hash := header.HashBlock()
return accountMeta, &iotextypes.BlockIdentifier{
Hash: hex.EncodeToString(hash[:]),
Height: tipHeight,
}, nil
}
// ChainMeta returns blockchain metadata
func (core *coreService) ChainMeta() (*iotextypes.ChainMeta, string, error) {
tipHeight := core.bc.TipHeight()
if tipHeight == 0 {
return &iotextypes.ChainMeta{
Epoch: &iotextypes.EpochData{},
ChainID: core.bc.ChainID(),
}, "", nil
}
syncStatus := ""
if core.bs != nil {
syncStatus = core.bs.SyncStatus()
}
chainMeta := &iotextypes.ChainMeta{
Height: tipHeight,
ChainID: core.bc.ChainID(),
}
if core.indexer == nil {
return chainMeta, syncStatus, nil
}
totalActions, err := core.indexer.GetTotalActions()
if err != nil {
return nil, "", status.Error(codes.Internal, err.Error())
}
blockLimit := int64(core.cfg.API.TpsWindow)
if blockLimit <= 0 {
return nil, "", status.Errorf(codes.Internal, "block limit is %d", blockLimit)
}
// avoid genesis block
if int64(tipHeight) < blockLimit {
blockLimit = int64(tipHeight)
}
blks, err := core.BlockMetas(tipHeight-uint64(blockLimit)+1, uint64(blockLimit))
if err != nil {
return nil, "", status.Error(codes.NotFound, err.Error())
}
if len(blks) == 0 {
return nil, "", status.Error(codes.NotFound, "get 0 blocks! not able to calculate aps")
}
var numActions int64
for _, blk := range blks {
numActions += blk.NumActions
}
t1 := time.Unix(blks[0].Timestamp.GetSeconds(), int64(blks[0].Timestamp.GetNanos()))
t2 := time.Unix(blks[len(blks)-1].Timestamp.GetSeconds(), int64(blks[len(blks)-1].Timestamp.GetNanos()))
// duration of time difference in milli-seconds
// TODO: use config.Genesis.BlockInterval after PR1289 merges
timeDiff := (t2.Sub(t1) + 10*time.Second) / time.Millisecond
tps := float32(numActions*1000) / float32(timeDiff)
chainMeta.NumActions = int64(totalActions)
chainMeta.Tps = int64(math.Ceil(float64(tps)))
chainMeta.TpsFloat = tps
rp := rolldpos.FindProtocol(core.registry)
if rp != nil {
epochNum := rp.GetEpochNum(tipHeight)
epochHeight := rp.GetEpochHeight(epochNum)
gravityChainStartHeight, err := core.getGravityChainStartHeight(epochHeight)
if err != nil {
return nil, "", status.Error(codes.NotFound, err.Error())
}
chainMeta.Epoch = &iotextypes.EpochData{
Num: epochNum,
Height: epochHeight,
GravityChainStartHeight: gravityChainStartHeight,
}
}
return chainMeta, syncStatus, nil
}
// ServerMeta gets the server metadata
func (core *coreService) ServerMeta() (packageVersion string, packageCommitID string, gitStatus string, goVersion string, buildTime string) {
packageVersion = version.PackageVersion
packageCommitID = version.PackageCommitID
gitStatus = version.GitStatus
goVersion = version.GoVersion
buildTime = version.BuildTime
return
}
// SendAction is the API to send an action to blockchain.
func (core *coreService) SendAction(ctx context.Context, in *iotextypes.Action) (string, error) {
log.L().Debug("receive send action request")
var selp action.SealedEnvelope
if err := selp.LoadProto(in); err != nil {
return "", status.Error(codes.InvalidArgument, err.Error())
}
// reject action if chainID is not matched at KamchatkaHeight
if core.cfg.Genesis.Blockchain.IsToBeEnabled(core.bc.TipHeight()) {
if core.bc.ChainID() != in.GetCore().GetChainID() {
return "", status.Errorf(codes.InvalidArgument, "ChainID does not match, expecting %d, got %d", core.bc.ChainID(), in.GetCore().GetChainID())
}
}
// Add to local actpool
ctx = protocol.WithRegistry(ctx, core.registry)
hash, err := selp.Hash()
if err != nil {
return "", err
}
l := log.L().With(zap.String("actionHash", hex.EncodeToString(hash[:])))
if err = core.ap.Add(ctx, selp); err != nil {
txBytes, serErr := proto.Marshal(in)
if serErr != nil {
l.Error("Data corruption", zap.Error(serErr))
} else {
l.With(zap.String("txBytes", hex.EncodeToString(txBytes))).Error("Failed to accept action", zap.Error(err))
}
errMsg := core.cfg.ProducerAddress().String() + ": " + err.Error()
st := status.New(codes.Internal, errMsg)
br := &errdetails.BadRequest{
FieldViolations: []*errdetails.BadRequest_FieldViolation{
{
Field: "Action rejected",
Description: action.LoadErrorDescription(err),
},
},
}
st, err := st.WithDetails(br)
if err != nil {
log.S().Panicf("Unexpected error attaching metadata: %v", err)
}
return "", st.Err()
}
// If there is no error putting into local actpool,
// Broadcast it to the network
if err = core.broadcastHandler(ctx, core.bc.ChainID(), in); err != nil {
l.Warn("Failed to broadcast SendAction request.", zap.Error(err))
}
return hex.EncodeToString(hash[:]), nil
}
// ReceiptByAction gets receipt with corresponding action hash
func (core *coreService) ReceiptByAction(actHash hash.Hash256) (*action.Receipt, string, error) {
if !core.hasActionIndex || core.indexer == nil {
return nil, "", status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())
}
receipt, err := core.ReceiptByActionHash(actHash)
if err != nil {
return nil, "", status.Error(codes.NotFound, err.Error())
}
blkHash, err := core.getBlockHashByActionHash(actHash)
if err != nil {
return nil, "", status.Error(codes.NotFound, err.Error())
}
return receipt, hex.EncodeToString(blkHash[:]), nil
}
// ReadContract reads the state in a contract address specified by the slot
func (core *coreService) ReadContract(ctx context.Context, in *iotextypes.Execution, callerAddr address.Address, gasLimit uint64) (string, *iotextypes.Receipt, error) {
log.L().Debug("receive read smart contract request")
sc := &action.Execution{}
if err := sc.LoadProto(in); err != nil {
return "", nil, status.Error(codes.InvalidArgument, err.Error())
}
key := hash.Hash160b(append([]byte(sc.Contract()), sc.Data()...))
// TODO: either moving readcache into the upper layer or change the storage format
if d, ok := core.readCache.Get(key); ok {
res := iotexapi.ReadContractResponse{}
if err := proto.Unmarshal(d, &res); err == nil {
return res.Data, res.Receipt, nil
}
}
state, err := accountutil.AccountState(core.sf, callerAddr)
if err != nil {
return "", nil, status.Error(codes.InvalidArgument, err.Error())
}
if ctx, err = core.bc.Context(ctx); err != nil {
return "", nil, err
}
if gasLimit == 0 || core.cfg.Genesis.BlockGasLimit < gasLimit {
gasLimit = core.cfg.Genesis.BlockGasLimit
}
sc, _ = action.NewExecution(
sc.Contract(),
state.Nonce+1,
sc.Amount(),
gasLimit,
big.NewInt(0), // ReadContract() is read-only, use 0 to prevent insufficient gas
sc.Data(),
)
retval, receipt, err := core.sf.SimulateExecution(ctx, callerAddr, sc, core.dao.GetBlockHash)
if err != nil {
return "", nil, status.Error(codes.Internal, err.Error())
}
// ReadContract() is read-only, if no error returned, we consider it a success
receipt.Status = uint64(iotextypes.ReceiptStatus_Success)
res := iotexapi.ReadContractResponse{
Data: hex.EncodeToString(retval),
Receipt: receipt.ConvertToReceiptPb(),
}
if d, err := proto.Marshal(&res); err == nil {
core.readCache.Put(key, d)
}
return res.Data, res.Receipt, nil
}
// ReadState reads state on blockchain
func (core *coreService) ReadState(protocolID string, height string, methodName []byte, arguments [][]byte) (*iotexapi.ReadStateResponse, error) {
p, ok := core.registry.Find(protocolID)
if !ok {
return nil, status.Errorf(codes.Internal, "protocol %s isn't registered", protocolID)
}
data, readStateHeight, err := core.readState(context.Background(), p, height, methodName, arguments...)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
blkHash, err := core.dao.GetBlockHash(readStateHeight)
if err != nil {
if errors.Cause(err) == db.ErrNotExist {
return nil, status.Error(codes.NotFound, err.Error())
}
return nil, status.Error(codes.Internal, err.Error())
}
return &iotexapi.ReadStateResponse{
Data: data,
BlockIdentifier: &iotextypes.BlockIdentifier{
Height: readStateHeight,
Hash: hex.EncodeToString(blkHash[:]),
},
}, nil
}
// SuggestGasPrice suggests gas price
func (core *coreService) SuggestGasPrice() (uint64, error) {
return core.gs.SuggestGasPrice()
}
// EstimateGasForAction estimates gas for action
func (core *coreService) EstimateGasForAction(in *iotextypes.Action) (uint64, error) {
estimateGas, err := core.gs.EstimateGasForAction(in)
if err != nil {
return 0, status.Error(codes.Internal, err.Error())
}
return estimateGas, nil
}
// EstimateActionGasConsumption estimate gas consume for action without signature
func (core *coreService) EstimateActionGasConsumption(ctx context.Context, in *iotexapi.EstimateActionGasConsumptionRequest) (uint64, error) {
var ret uint64
// TODO: refactor gas estimation code out of core service
switch {
case in.GetExecution() != nil:
request := in.GetExecution()
return core.estimateActionGasConsumptionForExecution(ctx, request, in.GetCallerAddress())
case in.GetTransfer() != nil:
ret = uint64(len(in.GetTransfer().Payload))*action.TransferPayloadGas + action.TransferBaseIntrinsicGas
case in.GetStakeCreate() != nil:
ret = uint64(len(in.GetStakeCreate().Payload))*action.CreateStakePayloadGas + action.CreateStakeBaseIntrinsicGas
case in.GetStakeUnstake() != nil:
ret = uint64(len(in.GetStakeUnstake().Payload))*action.ReclaimStakePayloadGas + action.ReclaimStakeBaseIntrinsicGas
case in.GetStakeWithdraw() != nil:
ret = uint64(len(in.GetStakeWithdraw().Payload))*action.ReclaimStakePayloadGas + action.ReclaimStakeBaseIntrinsicGas
case in.GetStakeAddDeposit() != nil:
ret = uint64(len(in.GetStakeAddDeposit().Payload))*action.DepositToStakePayloadGas + action.DepositToStakeBaseIntrinsicGas
case in.GetStakeRestake() != nil:
ret = uint64(len(in.GetStakeRestake().Payload))*action.RestakePayloadGas + action.RestakeBaseIntrinsicGas
case in.GetStakeChangeCandidate() != nil:
ret = uint64(len(in.GetStakeChangeCandidate().Payload))*action.MoveStakePayloadGas + action.MoveStakeBaseIntrinsicGas
case in.GetStakeTransferOwnership() != nil:
ret = uint64(len(in.GetStakeTransferOwnership().Payload))*action.MoveStakePayloadGas + action.MoveStakeBaseIntrinsicGas
case in.GetCandidateRegister() != nil:
ret = uint64(len(in.GetCandidateRegister().Payload))*action.CandidateRegisterPayloadGas + action.CandidateRegisterBaseIntrinsicGas
case in.GetCandidateUpdate() != nil:
ret = action.CandidateUpdateBaseIntrinsicGas
default:
return 0, status.Error(codes.InvalidArgument, "invalid argument")
}
return ret, nil
}
// EpochMeta gets epoch metadata
func (core *coreService) EpochMeta(epochNum uint64) (*iotextypes.EpochData, uint64, []*iotexapi.BlockProducerInfo, error) {
rp := rolldpos.FindProtocol(core.registry)
if rp == nil {
return nil, 0, nil, nil
}
if epochNum < 1 {
return nil, 0, nil, status.Error(codes.InvalidArgument, "epoch number cannot be less than one")
}
epochHeight := rp.GetEpochHeight(epochNum)
gravityChainStartHeight, err := core.getGravityChainStartHeight(epochHeight)
if err != nil {
return nil, 0, nil, status.Error(codes.NotFound, err.Error())
}
epochData := &iotextypes.EpochData{
Num: epochNum,
Height: epochHeight,
GravityChainStartHeight: gravityChainStartHeight,
}
pp := poll.FindProtocol(core.registry)
if pp == nil {
return nil, 0, nil, status.Error(codes.Internal, "poll protocol is not registered")
}
methodName := []byte("ActiveBlockProducersByEpoch")
arguments := [][]byte{[]byte(strconv.FormatUint(epochNum, 10))}
height := strconv.FormatUint(epochHeight, 10)
data, _, err := core.readState(context.Background(), pp, height, methodName, arguments...)
if err != nil {
return nil, 0, nil, status.Error(codes.NotFound, err.Error())
}
var activeConsensusBlockProducers state.CandidateList
if err := activeConsensusBlockProducers.Deserialize(data); err != nil {
return nil, 0, nil, status.Error(codes.Internal, err.Error())
}
numBlks, produce, err := core.getProductivityByEpoch(rp, epochNum, core.bc.TipHeight(), activeConsensusBlockProducers)
if err != nil {
return nil, 0, nil, status.Error(codes.NotFound, err.Error())
}
methodName = []byte("BlockProducersByEpoch")
data, _, err = core.readState(context.Background(), pp, height, methodName, arguments...)
if err != nil {
return nil, 0, nil, status.Error(codes.NotFound, err.Error())
}
var BlockProducers state.CandidateList
if err := BlockProducers.Deserialize(data); err != nil {
return nil, 0, nil, status.Error(codes.Internal, err.Error())
}
var blockProducersInfo []*iotexapi.BlockProducerInfo
for _, bp := range BlockProducers {
var active bool
var blockProduction uint64
if production, ok := produce[bp.Address]; ok {
active = true
blockProduction = production
}
blockProducersInfo = append(blockProducersInfo, &iotexapi.BlockProducerInfo{
Address: bp.Address,
Votes: bp.Votes.String(),
Active: active,
Production: blockProduction,
})
}
return epochData, numBlks, blockProducersInfo, nil
}
// RawBlocks gets raw block data
func (core *coreService) RawBlocks(startHeight uint64, count uint64, withReceipts bool, withTransactionLogs bool) ([]*iotexapi.BlockInfo, error) {
if count == 0 || count > core.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
tipHeight := core.bc.TipHeight()
if startHeight > tipHeight {
return nil, status.Error(codes.InvalidArgument, "start height should not exceed tip height")
}
endHeight := startHeight + count - 1
if endHeight > tipHeight {
endHeight = tipHeight
}
var res []*iotexapi.BlockInfo
for height := startHeight; height <= endHeight; height++ {
blk, err := core.dao.GetBlockByHeight(height)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
var receiptsPb []*iotextypes.Receipt
if withReceipts && height > 0 {
receipts, err := core.dao.GetReceipts(height)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
for _, receipt := range receipts {
receiptsPb = append(receiptsPb, receipt.ConvertToReceiptPb())
}
}
var transactionLogs *iotextypes.TransactionLogs
if withTransactionLogs {
if transactionLogs, err = core.dao.TransactionLogs(height); err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
}
res = append(res, &iotexapi.BlockInfo{
Block: blk.ConvertToBlockPb(),
Receipts: receiptsPb,
TransactionLogs: transactionLogs,
})
}
return res, nil
}
// Logs get logs filtered by contract address and topics
func (core *coreService) Logs(in *iotexapi.GetLogsRequest) ([]*iotextypes.Log, error) {
if in.GetFilter() == nil {
return nil, status.Error(codes.InvalidArgument, "empty filter")
}
var (
logs []*iotextypes.Log
err error
)
switch {
case in.GetByBlock() != nil:
req := in.GetByBlock()
startBlock, err := core.dao.GetBlockHeight(hash.BytesToHash256(req.BlockHash))
if err != nil {
return nil, status.Error(codes.InvalidArgument, "invalid block hash")
}
logs, err = core.getLogsInBlock(logfilter.NewLogFilter(in.GetFilter(), nil, nil), startBlock)
if err != nil {
return nil, err
}
case in.GetByRange() != nil:
req := in.GetByRange()
startBlock := req.GetFromBlock()
if startBlock > core.bc.TipHeight() {
return nil, status.Error(codes.InvalidArgument, "start block > tip height")
}
endBlock := req.GetToBlock()
if endBlock > core.bc.TipHeight() || endBlock == 0 {
endBlock = core.bc.TipHeight()
}
paginationSize := req.GetPaginationSize()
if paginationSize == 0 {
paginationSize = 1000
}
if paginationSize > 5000 {
paginationSize = 5000
}
logs, err = core.getLogsInRange(logfilter.NewLogFilter(in.GetFilter(), nil, nil), startBlock, endBlock, paginationSize)
default:
return nil, status.Error(codes.InvalidArgument, "invalid GetLogsRequest type")
}
return logs, err
}
// StreamBlocks streams blocks
func (core *coreService) StreamBlocks(stream iotexapi.APIService_StreamBlocksServer) error {
errChan := make(chan error)
if err := core.chainListener.AddResponder(NewBlockListener(stream, errChan)); err != nil {
return status.Error(codes.Internal, err.Error())
}
for {
select {
case err := <-errChan:
if err != nil {
err = status.Error(codes.Aborted, err.Error())
}
return err
}
}
}
// StreamLogs streams logs that match the filter condition
func (core *coreService) StreamLogs(in *iotexapi.LogsFilter, stream iotexapi.APIService_StreamLogsServer) error {
if in == nil {
return status.Error(codes.InvalidArgument, "empty filter")
}
errChan := make(chan error)
// register the log filter so it will match logs in new blocks
if err := core.chainListener.AddResponder(logfilter.NewLogFilter(in, stream, errChan)); err != nil {
return status.Error(codes.Internal, err.Error())
}
for {
select {
case err := <-errChan:
if err != nil {
err = status.Error(codes.Aborted, err.Error())
}
return err
}
}
}
// ElectionBuckets returns the native election buckets.
func (core *coreService) ElectionBuckets(epochNum uint64) ([]*iotextypes.ElectionBucket, error) {
if core.electionCommittee == nil {
return nil, status.Error(codes.Unavailable, "Native election no supported")
}
buckets, err := core.electionCommittee.NativeBucketsByEpoch(epochNum)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
re := make([]*iotextypes.ElectionBucket, len(buckets))
for i, b := range buckets {
startTime, err := ptypes.TimestampProto(b.StartTime())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
re[i] = &iotextypes.ElectionBucket{
Voter: b.Voter(),
Candidate: b.Candidate(),
Amount: b.Amount().Bytes(),
StartTime: startTime,
Duration: ptypes.DurationProto(b.Duration()),
Decay: b.Decay(),
}
}
return re, nil
}
// ReceiptByActionHash returns receipt by action hash
func (core *coreService) ReceiptByActionHash(h hash.Hash256) (*action.Receipt, error) {
if !core.hasActionIndex || core.indexer == nil {
return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())
}
actIndex, err := core.indexer.GetActionIndex(h[:])
if err != nil {
return nil, err
}
return core.dao.GetReceiptByActionHash(h, actIndex.BlockHeight())
}
// TransactionLogByActionHash returns transaction log by action hash
func (core *coreService) TransactionLogByActionHash(actHash string) (*iotextypes.TransactionLog, error) {
if !core.hasActionIndex || core.indexer == nil {
return nil, status.Error(codes.Unimplemented, blockindex.ErrActionIndexNA.Error())
}
if !core.dao.ContainsTransactionLog() {
return nil, status.Error(codes.Unimplemented, filedao.ErrNotSupported.Error())
}
h, err := hex.DecodeString(actHash)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
actIndex, err := core.indexer.GetActionIndex(h)
if err != nil {
if errors.Cause(err) == db.ErrNotExist {
return nil, status.Error(codes.NotFound, err.Error())
}
return nil, status.Error(codes.Internal, err.Error())
}
sysLog, err := core.dao.TransactionLogs(actIndex.BlockHeight())
if err != nil {
if errors.Cause(err) == db.ErrNotExist {
return nil, status.Error(codes.NotFound, err.Error())
}
return nil, status.Error(codes.Internal, err.Error())
}
for _, log := range sysLog.Logs {
if bytes.Equal(h, log.ActionHash) {
return log, nil
}
}
return nil, status.Errorf(codes.NotFound, "transaction log not found for action %s", actHash)
}
// TransactionLogByBlockHeight returns transaction log by block height
func (core *coreService) TransactionLogByBlockHeight(blockHeight uint64) (*iotextypes.BlockIdentifier, *iotextypes.TransactionLogs, error) {
if !core.dao.ContainsTransactionLog() {
return nil, nil, status.Error(codes.Unimplemented, filedao.ErrNotSupported.Error())
}
tip, err := core.dao.Height()
if err != nil {
return nil, nil, status.Error(codes.Internal, err.Error())
}
if blockHeight < 1 || blockHeight > tip {
return nil, nil, status.Errorf(codes.InvalidArgument, "invalid block height = %d", blockHeight)
}
h, err := core.dao.GetBlockHash(blockHeight)
if err != nil {
if errors.Cause(err) == db.ErrNotExist {
return nil, nil, status.Error(codes.NotFound, err.Error())
}
return nil, nil, status.Error(codes.Internal, err.Error())
}
blockIdentifier := &iotextypes.BlockIdentifier{
Hash: hex.EncodeToString(h[:]),
Height: blockHeight,
}
sysLog, err := core.dao.TransactionLogs(blockHeight)
if err != nil {
if errors.Cause(err) == db.ErrNotExist {
// should return empty, no transaction happened in block
return blockIdentifier, nil, nil
}
return nil, nil, status.Error(codes.Internal, err.Error())
}
return blockIdentifier, sysLog, nil
}
// Start starts the API server
func (core *coreService) Start() error {
if err := core.bc.AddSubscriber(core.readCache); err != nil {
return errors.Wrap(err, "failed to add readCache")
}
if err := core.bc.AddSubscriber(core.chainListener); err != nil {
return errors.Wrap(err, "failed to add chainListener")
}
if err := core.chainListener.Start(); err != nil {
return errors.Wrap(err, "failed to start blockchain listener")
}
return nil
}
// Stop stops the API server
func (core *coreService) Stop() error {
return core.chainListener.Stop()
}
func (core *coreService) readState(ctx context.Context, p protocol.Protocol, height string, methodName []byte, arguments ...[]byte) ([]byte, uint64, error) {
key := ReadKey{
Name: p.Name(),
Height: height,
Method: methodName,
Args: arguments,
}
if d, ok := core.readCache.Get(key.Hash()); ok {
var h uint64
if height != "" {
h, _ = strconv.ParseUint(height, 0, 64)
}
return d, h, nil
}
// TODO: need to complete the context
tipHeight := core.bc.TipHeight()
ctx = protocol.WithBlockCtx(ctx, protocol.BlockCtx{
BlockHeight: tipHeight,
})
ctx = genesis.WithGenesisContext(
protocol.WithRegistry(ctx, core.registry),
core.cfg.Genesis,
)
ctx = protocol.WithFeatureCtx(protocol.WithFeatureWithHeightCtx(ctx))
rp := rolldpos.FindProtocol(core.registry)
if rp == nil {
return nil, uint64(0), errors.New("rolldpos is not registered")
}
tipEpochNum := rp.GetEpochNum(tipHeight)
if height != "" {
inputHeight, err := strconv.ParseUint(height, 0, 64)
if err != nil {
return nil, uint64(0), err
}
inputEpochNum := rp.GetEpochNum(inputHeight)
if inputEpochNum < tipEpochNum {
// old data, wrap to history state reader
d, h, err := p.ReadState(ctx, factory.NewHistoryStateReader(core.sf, rp.GetEpochHeight(inputEpochNum)), methodName, arguments...)
if err == nil {
core.readCache.Put(key.Hash(), d)
}
return d, h, err
}
}
// TODO: need to distinguish user error and system error
d, h, err := p.ReadState(ctx, core.sf, methodName, arguments...)
if err == nil {
core.readCache.Put(key.Hash(), d)
}
return d, h, err
}
func (core *coreService) getActionsFromIndex(totalActions, start, count uint64) ([]*iotexapi.ActionInfo, error) {
hashes, err := core.indexer.GetActionHashFromIndex(start, count)
if err != nil {
return nil, status.Error(codes.Unavailable, err.Error())
}
var actionInfo []*iotexapi.ActionInfo
for i := range hashes {
act, err := core.getAction(hash.BytesToHash256(hashes[i]), false)
if err != nil {
return nil, status.Error(codes.Unavailable, err.Error())
}
actionInfo = append(actionInfo, act)
}
return actionInfo, nil
}
// Actions returns actions within the range
func (core *coreService) Actions(start uint64, count uint64) ([]*iotexapi.ActionInfo, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
if count > core.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
totalActions, err := core.indexer.GetTotalActions()
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if start >= totalActions {
return nil, status.Error(codes.InvalidArgument, "start exceeds the total actions in the block")
}
if totalActions == uint64(0) || count == 0 {
return []*iotexapi.ActionInfo{}, nil
}
if start+count > totalActions {
count = totalActions - start
}
if core.hasActionIndex {
return core.getActionsFromIndex(totalActions, start, count)
}
// Finding actions in reverse order saves time for querying most recent actions
reverseStart := totalActions - (start + count)
if totalActions < start+count {
reverseStart = uint64(0)
count = totalActions - start
}
var res []*iotexapi.ActionInfo
var hit bool
for height := core.bc.TipHeight(); height >= 1 && count > 0; height-- {
blk, err := core.dao.GetBlockByHeight(height)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
if !hit && reverseStart >= uint64(len(blk.Actions)) {
reverseStart -= uint64(len(blk.Actions))
continue
}
// now reverseStart < len(blk.Actions), we are going to fetch actions from this block
hit = true
act := core.reverseActionsInBlock(blk, reverseStart, count)
res = append(act, res...)
count -= uint64(len(act))
reverseStart = 0
}
return res, nil
}
// Action returns action by action hash
func (core *coreService) Action(actionHash string, checkPending bool) (*iotexapi.ActionInfo, error) {
actHash, err := hash.HexStringToHash256(actionHash)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
act, err := core.getAction(actHash, checkPending)
if err != nil {
return nil, status.Error(codes.Unavailable, err.Error())
}
return act, nil
}
// ActionsByAddress returns all actions associated with an address
func (core *coreService) ActionsByAddress(addr address.Address, start uint64, count uint64) ([]*iotexapi.ActionInfo, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
if count > core.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
actions, err := core.indexer.GetActionsByAddress(hash.BytesToHash160(addr.Bytes()), start, count)
if err != nil {
if errors.Cause(err) == db.ErrBucketNotExist || errors.Cause(err) == db.ErrNotExist {
// no actions associated with address, return nil
return nil, nil
}
return nil, status.Error(codes.NotFound, err.Error())
}
var res []*iotexapi.ActionInfo
for i := range actions {
act, err := core.getAction(hash.BytesToHash256(actions[i]), false)
if err != nil {
continue
}
res = append(res, act)
}
return res, nil
}
// getBlockHashByActionHash returns block hash by action hash
func (core *coreService) getBlockHashByActionHash(h hash.Hash256) (hash.Hash256, error) {
actIndex, err := core.indexer.GetActionIndex(h[:])
if err != nil {
return hash.ZeroHash256, err
}
return core.dao.GetBlockHash(actIndex.BlockHeight())
}
// getActionByActionHash returns action by action hash
func (core *coreService) getActionByActionHash(h hash.Hash256) (action.SealedEnvelope, hash.Hash256, uint64, uint32, error) {
actIndex, err := core.indexer.GetActionIndex(h[:])
if err != nil {
return action.SealedEnvelope{}, hash.ZeroHash256, 0, 0, err
}
blk, err := core.dao.GetBlockByHeight(actIndex.BlockHeight())
if err != nil {
return action.SealedEnvelope{}, hash.ZeroHash256, 0, 0, err
}
selp, index, err := core.dao.GetActionByActionHash(h, actIndex.BlockHeight())
return selp, blk.HashBlock(), actIndex.BlockHeight(), index, err
}
// UnconfirmedActionsByAddress returns all unconfirmed actions in actpool associated with an address
func (core *coreService) UnconfirmedActionsByAddress(address string, start uint64, count uint64) ([]*iotexapi.ActionInfo, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
if count > core.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
selps := core.ap.GetUnconfirmedActs(address)
if len(selps) == 0 {
return []*iotexapi.ActionInfo{}, nil
}
if start >= uint64(len(selps)) {
return nil, status.Error(codes.InvalidArgument, "start exceeds the limit")
}
var res []*iotexapi.ActionInfo
for i := start; i < uint64(len(selps)) && i < start+count; i++ {
if act, err := core.pendingAction(selps[i]); err == nil {
res = append(res, act)
}
}
return res, nil
}
// ActionsByBlock returns all actions in a block
func (core *coreService) ActionsByBlock(blkHash string, start uint64, count uint64) ([]*iotexapi.ActionInfo, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
if count > core.cfg.API.RangeQueryLimit && count != math.MaxUint64 {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
hash, err := hash.HexStringToHash256(blkHash)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
blk, err := core.dao.GetBlock(hash)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
if start >= uint64(len(blk.Actions)) {
return nil, status.Error(codes.InvalidArgument, "start exceeds the limit")
}
return core.actionsInBlock(blk, start, count), nil
}
// BlockMetas returns blockmetas response within the height range
func (core *coreService) BlockMetas(start uint64, count uint64) ([]*iotextypes.BlockMeta, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
if count > core.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
var (
tipHeight = core.bc.TipHeight()
res []*iotextypes.BlockMeta
)
if start > tipHeight {
log.L().Debug("err in BlockMetas()", zap.Error(status.Error(codes.InvalidArgument, "start height should not exceed tip height")))
return nil, nil
}
for height := start; height <= tipHeight && count > 0; height++ {
blockMeta, err := core.getBlockMetaByHeight(height)
if err != nil {
return nil, err
}
res = append(res, blockMeta)
count--
}
return res, nil
}
// BlockMetaByHash returns blockmeta response by block hash
func (core *coreService) BlockMetaByHash(blkHash string) (*iotextypes.BlockMeta, error) {
hash, err := hash.HexStringToHash256(blkHash)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
height, err := core.dao.GetBlockHeight(hash)
if err != nil {
if errors.Cause(err) == db.ErrNotExist {
return nil, status.Error(codes.NotFound, err.Error())
}
return nil, status.Error(codes.Internal, err.Error())
}
return core.getBlockMetaByHeight(height)
}
// getBlockMetaByHeight gets BlockMeta by height
func (core *coreService) getBlockMetaByHeight(height uint64) (*iotextypes.BlockMeta, error) {
blk, err := core.dao.GetBlockByHeight(height)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
// get block's receipt
if blk.Height() > 0 {
blk.Receipts, err = core.dao.GetReceipts(height)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
}
return generateBlockMeta(blk), nil
}
// generateBlockMeta generates BlockMeta from block
func generateBlockMeta(blk *block.Block) *iotextypes.BlockMeta {
header := blk.Header
height := header.Height()
ts, _ := ptypes.TimestampProto(header.Timestamp())
var (
producerAddress string
h hash.Hash256
)
if blk.Height() > 0 {
producerAddress = header.ProducerAddress()
h = header.HashBlock()
} else {
h = block.GenesisHash()
}
txRoot := header.TxRoot()
receiptRoot := header.ReceiptRoot()
deltaStateDigest := header.DeltaStateDigest()
prevHash := header.PrevHash()
blockMeta := iotextypes.BlockMeta{
Hash: hex.EncodeToString(h[:]),
Height: height,
Timestamp: ts,
ProducerAddress: producerAddress,
TxRoot: hex.EncodeToString(txRoot[:]),
ReceiptRoot: hex.EncodeToString(receiptRoot[:]),
DeltaStateDigest: hex.EncodeToString(deltaStateDigest[:]),
PreviousBlockHash: hex.EncodeToString(prevHash[:]),
}
if logsBloom := header.LogsBloomfilter(); logsBloom != nil {
blockMeta.LogsBloom = hex.EncodeToString(logsBloom.Bytes())
}
blockMeta.NumActions = int64(len(blk.Actions))
blockMeta.TransferAmount = blk.CalculateTransferAmount().String()
blockMeta.GasLimit, blockMeta.GasUsed = gasLimitAndUsed(blk)
return &blockMeta
}
// GasLimitAndUsed returns the gas limit and used in a block
func gasLimitAndUsed(b *block.Block) (uint64, uint64) {
var gasLimit, gasUsed uint64
for _, tx := range b.Actions {
gasLimit += tx.GasLimit()
}
for _, r := range b.Receipts {
gasUsed += r.GasConsumed
}
return gasLimit, gasUsed
}
func (core *coreService) getGravityChainStartHeight(epochHeight uint64) (uint64, error) {
gravityChainStartHeight := epochHeight
if pp := poll.FindProtocol(core.registry); pp != nil {
methodName := []byte("GetGravityChainStartHeight")
arguments := [][]byte{[]byte(strconv.FormatUint(epochHeight, 10))}
data, _, err := core.readState(context.Background(), pp, "", methodName, arguments...)
if err != nil {
return 0, err
}
if len(data) == 0 {
return 0, nil
}
if gravityChainStartHeight, err = strconv.ParseUint(string(data), 10, 64); err != nil {
return 0, err
}
}
return gravityChainStartHeight, nil
}
func (core *coreService) committedAction(selp action.SealedEnvelope, blkHash hash.Hash256, blkHeight uint64) (*iotexapi.ActionInfo, error) {
actHash, err := selp.Hash()
if err != nil {
return nil, err
}
header, err := core.dao.Header(blkHash)
if err != nil {
return nil, err
}
sender := selp.SrcPubkey().Address()
receipt, err := core.dao.GetReceiptByActionHash(actHash, blkHeight)
if err != nil {
return nil, err
}
gas := new(big.Int)
gas = gas.Mul(selp.GasPrice(), big.NewInt(int64(receipt.GasConsumed)))
return &iotexapi.ActionInfo{
Action: selp.Proto(),
ActHash: hex.EncodeToString(actHash[:]),
BlkHash: hex.EncodeToString(blkHash[:]),
BlkHeight: header.Height(),
Sender: sender.String(),
GasFee: gas.String(),
Timestamp: header.BlockHeaderCoreProto().Timestamp,
}, nil
}
func (core *coreService) pendingAction(selp action.SealedEnvelope) (*iotexapi.ActionInfo, error) {
actHash, err := selp.Hash()
if err != nil {
return nil, err
}
sender := selp.SrcPubkey().Address()
return &iotexapi.ActionInfo{
Action: selp.Proto(),
ActHash: hex.EncodeToString(actHash[:]),
BlkHash: hex.EncodeToString(hash.ZeroHash256[:]),
BlkHeight: 0,
Sender: sender.String(),
Timestamp: nil,
Index: 0,
}, nil
}
func (core *coreService) getAction(actHash hash.Hash256, checkPending bool) (*iotexapi.ActionInfo, error) {
selp, blkHash, blkHeight, actIndex, err := core.getActionByActionHash(actHash)
if err == nil {
act, err := core.committedAction(selp, blkHash, blkHeight)
if err != nil {
return nil, err
}
act.Index = actIndex
return act, nil
}
// Try to fetch pending action from actpool
if checkPending {
selp, err = core.ap.GetActionByHash(actHash)
}
if err != nil {
return nil, err
}
return core.pendingAction(selp)
}
func (core *coreService) actionsInBlock(blk *block.Block, start, count uint64) []*iotexapi.ActionInfo {
var res []*iotexapi.ActionInfo
if len(blk.Actions) == 0 || start >= uint64(len(blk.Actions)) {
return res
}
h := blk.HashBlock()
blkHash := hex.EncodeToString(h[:])
blkHeight := blk.Height()
ts := blk.Header.BlockHeaderCoreProto().Timestamp
lastAction := start + count
if count == math.MaxUint64 {
// count = -1 means to get all actions
lastAction = uint64(len(blk.Actions))
} else {
if lastAction >= uint64(len(blk.Actions)) {
lastAction = uint64(len(blk.Actions))
}
}
for i := start; i < lastAction; i++ {
selp := blk.Actions[i]
actHash, err := selp.Hash()
if err != nil {
log.L().Debug("Skipping action due to hash error", zap.Error(err))
continue
}
sender := selp.SrcPubkey().Address()
res = append(res, &iotexapi.ActionInfo{
Action: selp.Proto(),
ActHash: hex.EncodeToString(actHash[:]),
BlkHash: blkHash,
Timestamp: ts,
BlkHeight: blkHeight,
Sender: sender.String(),
Index: uint32(i),
})
}
return res
}
func (core *coreService) reverseActionsInBlock(blk *block.Block, reverseStart, count uint64) []*iotexapi.ActionInfo {
h := blk.HashBlock()
blkHash := hex.EncodeToString(h[:])
blkHeight := blk.Height()
ts := blk.Header.BlockHeaderCoreProto().Timestamp
var res []*iotexapi.ActionInfo
for i := reverseStart; i < uint64(len(blk.Actions)) && i < reverseStart+count; i++ {
ri := uint64(len(blk.Actions)) - 1 - i
selp := blk.Actions[ri]
actHash, err := selp.Hash()
if err != nil {
log.L().Debug("Skipping action due to hash error", zap.Error(err))
continue
}
sender := selp.SrcPubkey().Address()
res = append([]*iotexapi.ActionInfo{{
Action: selp.Proto(),
ActHash: hex.EncodeToString(actHash[:]),
BlkHash: blkHash,
Timestamp: ts,
BlkHeight: blkHeight,
Sender: sender.String(),
Index: uint32(ri),
}}, res...)
}
return res
}
func (core *coreService) getLogsInBlock(filter *logfilter.LogFilter, blockNumber uint64) ([]*iotextypes.Log, error) {
logBloomFilter, err := core.bfIndexer.BlockFilterByHeight(blockNumber)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
if !filter.ExistInBloomFilterv2(logBloomFilter) {
return nil, nil
}
receipts, err := core.dao.GetReceipts(blockNumber)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
h, err := core.dao.GetBlockHash(blockNumber)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
return filter.MatchLogs(receipts, h), nil
}
// TODO: improve using goroutine
func (core *coreService) getLogsInRange(filter *logfilter.LogFilter, start, end, paginationSize uint64) ([]*iotextypes.Log, error) {
if start > end {
return nil, errors.New("invalid start and end height")
}
if start == 0 {
start = 1
}
logs := []*iotextypes.Log{}
// getLogs via range Blooom filter [start, end]
blockNumbers, err := core.bfIndexer.FilterBlocksInRange(filter, start, end)
if err != nil {
return nil, err
}
for _, i := range blockNumbers {
logsInBlock, err := core.getLogsInBlock(filter, i)
if err != nil {
return nil, err
}
for _, log := range logsInBlock {
logs = append(logs, log)
if len(logs) >= int(paginationSize) {
return logs, nil
}
}
}
return logs, nil
}
func (core *coreService) estimateActionGasConsumptionForExecution(ctx context.Context, exec *iotextypes.Execution, sender string) (uint64, error) {
sc := &action.Execution{}
if err := sc.LoadProto(exec); err != nil {
return 0, status.Error(codes.InvalidArgument, err.Error())
}
addr, err := address.FromString(sender)
if err != nil {
return 0, status.Error(codes.FailedPrecondition, err.Error())
}
state, err := accountutil.AccountState(core.sf, addr)
if err != nil {
return 0, status.Error(codes.InvalidArgument, err.Error())
}
nonce := state.Nonce + 1
callerAddr, err := address.FromString(sender)
if err != nil {
return 0, status.Error(codes.InvalidArgument, err.Error())
}
enough, receipt, err := core.isGasLimitEnough(ctx, callerAddr, sc, nonce, core.cfg.Genesis.BlockGasLimit)
if err != nil {
return 0, status.Error(codes.Internal, err.Error())
}
if !enough {
if receipt.ExecutionRevertMsg() != "" {
return 0, status.Errorf(codes.Internal, fmt.Sprintf("execution simulation is reverted due to the reason: %s", receipt.ExecutionRevertMsg()))
}
return 0, status.Error(codes.Internal, fmt.Sprintf("execution simulation failed: status = %d", receipt.Status))
}
estimatedGas := receipt.GasConsumed
enough, _, err = core.isGasLimitEnough(ctx, callerAddr, sc, nonce, estimatedGas)
if err != nil && err != action.ErrInsufficientFunds {
return 0, status.Error(codes.Internal, err.Error())
}
if !enough {
low, high := estimatedGas, core.cfg.Genesis.BlockGasLimit
estimatedGas = high
for low <= high {
mid := (low + high) / 2
enough, _, err = core.isGasLimitEnough(ctx, callerAddr, sc, nonce, mid)
if err != nil && err != action.ErrInsufficientFunds {
return 0, status.Error(codes.Internal, err.Error())
}
if enough {
estimatedGas = mid
high = mid - 1
} else {
low = mid + 1
}
}
}
return estimatedGas, nil
}
func (core *coreService) isGasLimitEnough(
ctx context.Context,
caller address.Address,
sc *action.Execution,
nonce uint64,
gasLimit uint64,
) (bool, *action.Receipt, error) {
ctx, span := tracer.NewSpan(ctx, "Server.isGasLimitEnough")
defer span.End()
sc, _ = action.NewExecution(
sc.Contract(),
nonce,
sc.Amount(),
gasLimit,
big.NewInt(0),
sc.Data(),
)
ctx, err := core.bc.Context(ctx)
if err != nil {
return false, nil, err
}
_, receipt, err := core.sf.SimulateExecution(ctx, caller, sc, core.dao.GetBlockHash)
if err != nil {
return false, nil, err
}
return receipt.Status == uint64(iotextypes.ReceiptStatus_Success), receipt, nil
}
func (core *coreService) getProductivityByEpoch(
rp *rolldpos.Protocol,
epochNum uint64,
tipHeight uint64,
abps state.CandidateList,
) (uint64, map[string]uint64, error) {
num, produce, err := rp.ProductivityByEpoch(epochNum, tipHeight, func(start uint64, end uint64) (map[string]uint64, error) {
return blockchain.Productivity(core.bc, start, end)
})
if err != nil {
return 0, nil, status.Error(codes.NotFound, err.Error())
}
// check if there is any active block producer who didn't prodcue any block
for _, abp := range abps {
if _, ok := produce[abp.Address]; !ok {
produce[abp.Address] = 0
}
}
return num, produce, nil
}
func (core *coreService) getProtocolAccount(ctx context.Context, addr string) (*iotextypes.AccountMeta, *iotextypes.BlockIdentifier, error) {
var (
balance string
out *iotexapi.ReadStateResponse
err error
)
switch addr {
case address.RewardingPoolAddr:
if out, err = core.ReadState("rewarding", "", []byte("TotalBalance"), nil); err != nil {
return nil, nil, err
}
val, ok := big.NewInt(0).SetString(string(out.GetData()), 10)
if !ok {
return nil, nil, errors.New("balance convert error")
}
balance = val.String()
case address.StakingBucketPoolAddr:
methodName, err := proto.Marshal(&iotexapi.ReadStakingDataMethod{
Method: iotexapi.ReadStakingDataMethod_TOTAL_STAKING_AMOUNT,
})
if err != nil {
return nil, nil, err
}
arg, err := proto.Marshal(&iotexapi.ReadStakingDataRequest{
Request: &iotexapi.ReadStakingDataRequest_TotalStakingAmount_{
TotalStakingAmount: &iotexapi.ReadStakingDataRequest_TotalStakingAmount{},
},
})
if err != nil {
return nil, nil, err
}
if out, err = core.ReadState("staking", "", methodName, [][]byte{arg}); err != nil {
return nil, nil, err
}
acc := iotextypes.AccountMeta{}
if err := proto.Unmarshal(out.GetData(), &acc); err != nil {
return nil, nil, errors.Wrap(err, "failed to unmarshal account meta")
}
balance = acc.GetBalance()
default:
return nil, nil, errors.Errorf("invalid address %s", addr)
}
return &iotextypes.AccountMeta{
Address: addr,
Balance: balance,
}, out.GetBlockIdentifier(), nil
}
// ActPoolActions returns the all Transaction Identifiers in the mempool
func (core *coreService) ActPoolActions(actHashes []string) ([]*iotextypes.Action, error) {
var ret []*iotextypes.Action
if len(actHashes) == 0 {
for _, sealeds := range core.ap.PendingActionMap() {
for _, sealed := range sealeds {
ret = append(ret, sealed.Proto())
}
}
return ret, nil
}
for _, hashStr := range actHashes {
hs, err := hash.HexStringToHash256(hashStr)
if err != nil {
return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "failed to hex string to hash256").Error())
}
sealed, err := core.ap.GetActionByHash(hs)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
ret = append(ret, sealed.Proto())
}
return ret, nil
}
// EVMNetworkID returns the network id of evm
func (core *coreService) EVMNetworkID() uint32 {
return config.EVMNetworkID()
}
// ChainID returns the chain id of evm
func (core *coreService) ChainID() uint32 {
return core.bc.ChainID()
}
// GetActionByActionHash returns action by action hash
func (core *coreService) ActionByActionHash(h hash.Hash256) (action.SealedEnvelope, error) {
if !core.hasActionIndex || core.indexer == nil {
return action.SealedEnvelope{}, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())
}
selp, _, _, _, err := core.getActionByActionHash(h)
return selp, err
}
// ReadContractStorage reads contract's storage
func (core *coreService) ReadContractStorage(ctx context.Context, addr address.Address, key []byte) ([]byte, error) {
ctx, err := core.bc.Context(ctx)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return core.sf.ReadContractStorage(ctx, addr, key)
}
| 1 | 24,373 | should use `codes.InvalidArgument`, same as above line 1033 and 1036 | iotexproject-iotex-core | go |
@@ -51,7 +51,7 @@ func getAgentVersions() []DockerVersion {
return append(getWindowsReplaceableVersions(), MinDockerAPIWindows)
}
-// getDefaultVersion returns agent's default version of the Docker API
+// GetDefaultVersion returns agent's default version of the Docker API
func getDefaultVersion() DockerVersion {
return MinDockerAPIWindows
} | 1 | // +build windows
// Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package dockerclient
import (
"github.com/aws/amazon-ecs-agent/agent/engine/dockeriface"
)
const MinDockerAPIWindows = Version_1_24
// GetClient will replace some versions of Docker on Windows. We need this because
// agent assumes that it can always call older versions of the docker API.
func (f *factory) GetClient(version DockerVersion) (dockeriface.Client, error) {
for _, v := range getWindowsReplaceableVersions() {
if v == version {
version = MinDockerAPIWindows
break
}
}
return f.getClient(version)
}
// getWindowsReplaceableVersions returns the set of versions that agent will report
// as Docker 1.24
func getWindowsReplaceableVersions() []DockerVersion {
return []DockerVersion{
Version_1_17,
Version_1_18,
Version_1_19,
Version_1_20,
Version_1_21,
Version_1_22,
Version_1_23,
}
}
// getAgentVersions for Windows should return all of the replaceable versions plus additional versions
func getAgentVersions() []DockerVersion {
return append(getWindowsReplaceableVersions(), MinDockerAPIWindows)
}
// getDefaultVersion returns agent's default version of the Docker API
func getDefaultVersion() DockerVersion {
return MinDockerAPIWindows
}
| 1 | 16,003 | Please revert this file. | aws-amazon-ecs-agent | go |
@@ -18,10 +18,13 @@
//
// URLs
//
-// For pubsub.OpenTopic/Subscription URLs, azurepubsub registers for the scheme
-// "azuresb". The Service Bus Connection String defaults to the environment
-// variable "SERVICEBUS_CONNECTION_STRING". For details on the format of the
-// URL, see URLOpener.
+// For pubsub.OpenTopic and pubsub.OpenSubscription, azurepubsub registers
+// for the scheme "azuresb".
+// The default URL opener will use a Service Bus Connection String based on
+// the environment variable "SERVICEBUS_CONNECTION_STRING".
+// To customize the URL opener, or for more details on the URL format,
+// see URLOpener.
+// See https://godoc.org/gocloud.dev#hdr-URLs for background information.
//
// As
// | 1 | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package azurepubsub provides an implementation of pubsub using Azure Service
// Bus Topic and Subscription.
// See https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-messaging-overview for an overview.
//
// URLs
//
// For pubsub.OpenTopic/Subscription URLs, azurepubsub registers for the scheme
// "azuresb". The Service Bus Connection String defaults to the environment
// variable "SERVICEBUS_CONNECTION_STRING". For details on the format of the
// URL, see URLOpener.
//
// As
//
// azurepubsub exposes the following types for As:
// - Topic: *servicebus.Topic
// - Subscription: *servicebus.Subscription
// - Message: *servicebus.Message
// - Error: common.Retryable
package azurepubsub // import "gocloud.dev/pubsub/azurepubsub"
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"path"
"runtime"
"sync"
"time"
"gocloud.dev/gcerrors"
"gocloud.dev/internal/useragent"
"gocloud.dev/pubsub"
"gocloud.dev/pubsub/driver"
"pack.ag/amqp"
common "github.com/Azure/azure-amqp-common-go"
"github.com/Azure/azure-amqp-common-go/cbs"
"github.com/Azure/azure-amqp-common-go/rpc"
"github.com/Azure/azure-amqp-common-go/uuid"
servicebus "github.com/Azure/azure-service-bus-go"
)
const (
completedStatus = "completed"
listenerTimeout = 1 * time.Second
rpcTries = 5
rpcRetryDelay = 1 * time.Second
)
func init() {
o := new(defaultConnectionStringOpener)
pubsub.DefaultURLMux().RegisterTopic(Scheme, o)
pubsub.DefaultURLMux().RegisterSubscription(Scheme, o)
}
// defaultURLOpener creates an URLOpener with ConnectionString initialized from
// the environment variable SERVICEBUS_CONNECTION_STRING.
type defaultConnectionStringOpener struct {
init sync.Once
opener *URLOpener
err error
}
func (o *defaultConnectionStringOpener) defaultOpener() (*URLOpener, error) {
o.init.Do(func() {
cs := os.Getenv("SERVICEBUS_CONNECTION_STRING")
if cs == "" {
o.err = errors.New("SERVICEBUS_CONNECTION_STRING environment variable not set")
return
}
o.opener = &URLOpener{
ConnectionString: cs,
}
})
return o.opener, o.err
}
func (o *defaultConnectionStringOpener) OpenTopicURL(ctx context.Context, u *url.URL) (*pubsub.Topic, error) {
opener, err := o.defaultOpener()
if err != nil {
return nil, fmt.Errorf("open topic %v: %v", u, err)
}
return opener.OpenTopicURL(ctx, u)
}
func (o *defaultConnectionStringOpener) OpenSubscriptionURL(ctx context.Context, u *url.URL) (*pubsub.Subscription, error) {
opener, err := o.defaultOpener()
if err != nil {
return nil, fmt.Errorf("open subscription %v: %v", u, err)
}
return opener.OpenSubscriptionURL(ctx, u)
}
// Scheme is the URL scheme azurepubsub registers its URLOpeners under on pubsub.DefaultMux.
const Scheme = "azuresb"
// URLOpener opens Azure Service Bus URLs like "azuresb://mytopic" for
// topics or "azuresb://mysubscription?topic=mytopic" for subscriptions.
//
// - The URL's host+path is used as the topic name.
// - For subscriptions, the subscription name must be provided in the
// "subscription" query parameter.
type URLOpener struct {
// ConnectionString is the Service Bus connection string.
// https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-dotnet-get-started-with-queues
ConnectionString string
// Options passed when creating the ServiceBus Topic/Subscription.
ServiceBusTopicOptions []servicebus.TopicOption
ServiceBusSubscriptionOptions []servicebus.SubscriptionOption
// TopicOptions specifies the options to pass to OpenTopic.
TopicOptions TopicOptions
// SubscriptionOptions specifies the options to pass to OpenSubscription.
SubscriptionOptions SubscriptionOptions
}
// Gets the *servicebus.Namespace using a connection string from the
// env, URLOpener, or query param.
func (o *URLOpener) namespace(kind string, u *url.URL) (*servicebus.Namespace, error) {
if o.ConnectionString == "" {
return nil, fmt.Errorf("open %s %v: ConnectionString is required", kind, u)
}
ns, err := NewNamespaceFromConnectionString(o.ConnectionString)
if err != nil {
return nil, fmt.Errorf("open %s %v: invalid connection string %q: %v", kind, u, o.ConnectionString, err)
}
return ns, nil
}
// OpenTopicURL opens a pubsub.Topic based on u.
func (o *URLOpener) OpenTopicURL(ctx context.Context, u *url.URL) (*pubsub.Topic, error) {
ns, err := o.namespace("topic", u)
if err != nil {
return nil, err
}
for param := range u.Query() {
return nil, fmt.Errorf("open topic %q: invalid query parameter %q", u, param)
}
topicName := path.Join(u.Host, u.Path)
t, err := NewTopic(ns, topicName, o.ServiceBusTopicOptions)
if err != nil {
return nil, fmt.Errorf("open topic %v: couldn't open topic %q: %v", u, topicName, err)
}
return OpenTopic(ctx, t, &o.TopicOptions), nil
}
// OpenSubscriptionURL opens a pubsub.Subscription based on u.
func (o *URLOpener) OpenSubscriptionURL(ctx context.Context, u *url.URL) (*pubsub.Subscription, error) {
ns, err := o.namespace("subscription", u)
if err != nil {
return nil, err
}
topicName := path.Join(u.Host, u.Path)
t, err := NewTopic(ns, topicName, o.ServiceBusTopicOptions)
if err != nil {
return nil, fmt.Errorf("open subscription %v: couldn't open topic %q: %v", u, topicName, err)
}
q := u.Query()
subName := q.Get("subscription")
q.Del("subscription")
if subName == "" {
return nil, fmt.Errorf("open subscription %q: missing required query parameter subscription", u)
}
for param := range q {
return nil, fmt.Errorf("open subscription %q: invalid query parameter %q", u, param)
}
sub, err := NewSubscription(t, subName, o.ServiceBusSubscriptionOptions)
if err != nil {
return nil, fmt.Errorf("open subscription %v: couldn't open subscription %q: %v", u, subName, err)
}
return OpenSubscription(ctx, ns, t, sub, &o.SubscriptionOptions), nil
}
type topic struct {
sbTopic *servicebus.Topic
}
// TopicOptions provides configuration options for an Azure SB Topic.
type TopicOptions struct{}
// NewNamespaceFromConnectionString returns a *servicebus.Namespace from a Service Bus connection string.
// https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-dotnet-get-started-with-queues
func NewNamespaceFromConnectionString(connectionString string) (*servicebus.Namespace, error) {
nsOptions := servicebus.NamespaceWithConnectionString(connectionString)
return servicebus.NewNamespace(nsOptions)
}
// NewTopic returns a *servicebus.Topic associated with a Service Bus Namespace.
func NewTopic(ns *servicebus.Namespace, topicName string, opts []servicebus.TopicOption) (*servicebus.Topic, error) {
return ns.NewTopic(topicName, opts...)
}
// NewSubscription returns a *servicebus.Subscription associated with a Service Bus Topic.
func NewSubscription(parentTopic *servicebus.Topic, subscriptionName string, opts []servicebus.SubscriptionOption) (*servicebus.Subscription, error) {
return parentTopic.NewSubscription(subscriptionName, opts...)
}
// OpenTopic initializes a pubsub Topic on a given Service Bus Topic.
func OpenTopic(ctx context.Context, sbTopic *servicebus.Topic, opts *TopicOptions) *pubsub.Topic {
t := openTopic(ctx, sbTopic)
return pubsub.NewTopic(t, nil)
}
// openTopic returns the driver for OpenTopic. This function exists so the test
// harness can get the driver interface implementation if it needs to.
func openTopic(ctx context.Context, sbTopic *servicebus.Topic) driver.Topic {
return &topic{
sbTopic: sbTopic,
}
}
// SendBatch implements driver.Topic.SendBatch.
func (t *topic) SendBatch(ctx context.Context, dms []*driver.Message) error {
for _, dm := range dms {
sbms := servicebus.NewMessage(dm.Body)
for k, v := range dm.Metadata {
sbms.Set(k, v)
}
if err := t.sbTopic.Send(ctx, sbms); err != nil {
return err
}
}
return nil
}
func (t *topic) IsRetryable(err error) bool {
// Let the Service Bus SDK recover from any transient connectivity issue.
return false
}
func (t *topic) As(i interface{}) bool {
p, ok := i.(**servicebus.Topic)
if !ok {
return false
}
*p = t.sbTopic
return true
}
// ErrorAs implements driver.Topic.ErrorAs
func (*topic) ErrorAs(err error, i interface{}) bool {
return errorAs(err, i)
}
func errorAs(err error, i interface{}) bool {
switch v := err.(type) {
case *amqp.Error:
if p, ok := i.(**amqp.Error); ok {
*p = v
return true
}
case common.Retryable:
if p, ok := i.(*common.Retryable); ok {
*p = v
return true
}
}
return false
}
func (*topic) ErrorCode(err error) gcerrors.ErrorCode {
return errorCode(err)
}
type subscription struct {
sbSub *servicebus.Subscription
opts *SubscriptionOptions
topicName string // Used in driver.subscription.SendAcks to validate credentials before issuing the message complete bulk operation.
sbNs *servicebus.Namespace // Used in driver.subscription.SendAcks to validate credentials before issuing the message complete bulk operation.
}
// SubscriptionOptions will contain configuration for subscriptions.
type SubscriptionOptions struct {
ListenerTimeout time.Duration
}
// OpenSubscription initializes a pubsub Subscription on a given Service Bus Subscription and its parent Service Bus Topic.
func OpenSubscription(ctx context.Context, parentNamespace *servicebus.Namespace, parentTopic *servicebus.Topic, sbSubscription *servicebus.Subscription, opts *SubscriptionOptions) *pubsub.Subscription {
ds := openSubscription(ctx, parentNamespace, parentTopic, sbSubscription, opts)
return pubsub.NewSubscription(ds, nil)
}
// openSubscription returns a driver.Subscription.
func openSubscription(ctx context.Context, sbNs *servicebus.Namespace, sbTop *servicebus.Topic, sbSub *servicebus.Subscription, opts *SubscriptionOptions) driver.Subscription {
topicName := ""
if sbTop != nil {
topicName = sbTop.Name
}
defaultTimeout := listenerTimeout
if opts != nil && opts.ListenerTimeout > 0 {
defaultTimeout = opts.ListenerTimeout
}
return &subscription{
sbSub: sbSub,
topicName: topicName,
sbNs: sbNs,
opts: &SubscriptionOptions{
ListenerTimeout: defaultTimeout,
},
}
}
// testSBSubscription ensures the subscription exists before listening for incoming messages.
func (s *subscription) testSBSubscription(ctx context.Context) error {
if s.topicName == "" {
return errors.New("azurepubsub: driver.Subscription requires a Service Bus Topic")
}
if s.sbNs == nil {
return errors.New("azurepubsub: driver.Subscription requires a Service Bus Namespace")
}
if s.sbSub == nil {
return errors.New("azurepubsub: driver.Subscription requires a Service Bus Subscription")
}
sm, err := s.sbNs.NewSubscriptionManager(s.topicName)
if err != nil {
return err
}
// An empty SubscriptionEntity means no Service Bus Subscription exists for the given name.
se, _ := sm.Get(ctx, s.sbSub.Name)
if se == nil {
return fmt.Errorf("azurepubsub: no such subscription %q", s.sbSub.Name)
}
return nil
}
// IsRetryable implements driver.Subscription.IsRetryable.
func (s *subscription) IsRetryable(error) bool {
// Let the Service Bus SDK recover from any transient connectivity issue.
return false
}
// As implements driver.Subscription.As.
func (s *subscription) As(i interface{}) bool {
p, ok := i.(**servicebus.Subscription)
if !ok {
return false
}
*p = s.sbSub
return true
}
// ErrorAs implements driver.Subscription.ErrorAs
func (s *subscription) ErrorAs(err error, i interface{}) bool {
return errorAs(err, i)
}
func (s *subscription) ErrorCode(err error) gcerrors.ErrorCode {
return errorCode(err)
}
// ReceiveBatch implements driver.Subscription.ReceiveBatch.
func (s *subscription) ReceiveBatch(ctx context.Context, maxMessages int) ([]*driver.Message, error) {
// Test to ensure existence of the Service Bus Subscription before listening for messages.
// Listening on a non-existence Service Bus Subscription does not fail. This check is also needed for conformance tests which
// requires this scenario to fail on ReceiveBatch.
err := s.testSBSubscription(ctx)
if err != nil {
return nil, err
}
rctx, cancel := context.WithTimeout(ctx, s.opts.ListenerTimeout)
defer cancel()
var messages []*driver.Message
var wg sync.WaitGroup
wg.Add(1)
go func() {
s.sbSub.Receive(rctx, servicebus.HandlerFunc(func(innerctx context.Context, sbmsg *servicebus.Message) error {
metadata := map[string]string{}
sbmsg.ForeachKey(func(k, v string) error {
metadata[k] = v
return nil
})
messages = append(messages, &driver.Message{
Body: sbmsg.Data,
Metadata: metadata,
AckID: sbmsg.LockToken,
AsFunc: messageAsFunc(sbmsg),
})
if len(messages) >= maxMessages {
cancel()
}
return nil
}))
select {
case <-rctx.Done():
wg.Done()
}
}()
wg.Wait()
return messages, nil
}
func messageAsFunc(sbmsg *servicebus.Message) func(interface{}) bool {
return func(i interface{}) bool {
p, ok := i.(**servicebus.Message)
if !ok {
return false
}
*p = sbmsg
return true
}
}
// SendAcks implements driver.Subscription.SendAcks.
// IMPORTANT: This is a workaround to issue 'completed' message dispositions in bulk which is not supported in the Service Bus SDK.
func (s *subscription) SendAcks(ctx context.Context, ids []driver.AckID) error {
if len(ids) == 0 {
return nil
}
host := fmt.Sprintf("amqps://%s.%s/", s.sbNs.Name, s.sbNs.Environment.ServiceBusEndpointSuffix)
client, err := amqp.Dial(host,
amqp.ConnSASLAnonymous(),
amqp.ConnProperty("product", "Go-Cloud Client"),
amqp.ConnProperty("version", servicebus.Version),
amqp.ConnProperty("platform", runtime.GOOS),
amqp.ConnProperty("framework", runtime.Version()),
amqp.ConnProperty("user-agent", useragent.AzureUserAgentPrefix("pubsub")),
)
if err != nil {
return err
}
defer client.Close()
entityPath := s.topicName + "/Subscriptions/" + s.sbSub.Name
audience := host + entityPath
err = cbs.NegotiateClaim(ctx, audience, client, s.sbNs.TokenProvider)
if err != nil {
return nil
}
lockIds := []amqp.UUID{}
for _, mid := range ids {
if id, ok := mid.(*uuid.UUID); ok {
lockTokenBytes := [16]byte(*id)
lockIds = append(lockIds, amqp.UUID(lockTokenBytes))
}
}
value := map[string]interface{}{
"disposition-status": completedStatus,
"lock-tokens": lockIds,
}
msg := &amqp.Message{
ApplicationProperties: map[string]interface{}{
"operation": "com.microsoft:update-disposition",
},
Value: value,
}
link, err := rpc.NewLink(client, s.sbSub.ManagementPath())
if err != nil {
return err
}
_, err = link.RetryableRPC(ctx, rpcTries, rpcRetryDelay, msg)
return err
}
func errorCode(err error) gcerrors.ErrorCode {
aerr, ok := err.(*amqp.Error)
if !ok {
return gcerrors.Unknown
}
switch aerr.Condition {
case amqp.ErrorCondition(servicebus.ErrorNotFound):
return gcerrors.NotFound
case amqp.ErrorCondition(servicebus.ErrorPreconditionFailed):
return gcerrors.FailedPrecondition
case amqp.ErrorCondition(servicebus.ErrorInternalError):
return gcerrors.Internal
case amqp.ErrorCondition(servicebus.ErrorNotImplemented):
return gcerrors.Unimplemented
case amqp.ErrorCondition(servicebus.ErrorUnauthorizedAccess), amqp.ErrorCondition(servicebus.ErrorNotAllowed):
return gcerrors.PermissionDenied
case amqp.ErrorCondition(servicebus.ErrorResourceLimitExceeded):
return gcerrors.ResourceExhausted
case amqp.ErrorCondition(servicebus.ErrorInvalidField):
return gcerrors.InvalidArgument
default:
return gcerrors.Unknown
}
}
// AckFunc implements driver.Subscription.AckFunc.
func (*subscription) AckFunc() func() { return nil }
| 1 | 15,572 | Probably in another PR, I think the package should be renamed to `servicebus` to be consistent. | google-go-cloud | go |
@@ -557,7 +557,7 @@ namespace AutoRest.Core.Properties {
}
/// <summary>
- /// Looks up a localized string similar to A swagger must have security definitions and must adhere to the specific structure..
+ /// Looks up a localized string similar to Every swagger/configuration must have a security definitions section and it must adhere to the structure described in: https://github.com/Azure/autorest/tree/master/docs/developer/validation-rules/security-definitions-structure-validation.md.
/// </summary>
public static string SecurityDefinitionsStructureValidation {
get { | 1 | //------------------------------------------------------------------------------
// <auto-generated>
// This code was generated by a tool.
// Runtime Version:4.0.30319.42000
//
// Changes to this file may cause incorrect behavior and will be lost if
// the code is regenerated.
// </auto-generated>
//------------------------------------------------------------------------------
namespace AutoRest.Core.Properties {
using System;
using System.Reflection;
/// <summary>
/// A strongly-typed resource class, for looking up localized strings, etc.
/// </summary>
// This class was auto-generated by the StronglyTypedResourceBuilder
// class via a tool like ResGen or Visual Studio.
// To add or remove a member, edit your .ResX file then rerun ResGen
// with the /str option, or rebuild your VS project.
[global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")]
[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
[global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()]
public class Resources {
private static global::System.Resources.ResourceManager resourceMan;
private static global::System.Globalization.CultureInfo resourceCulture;
[global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")]
internal Resources() {
}
/// <summary>
/// Returns the cached ResourceManager instance used by this class.
/// </summary>
[global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)]
public static global::System.Resources.ResourceManager ResourceManager {
get {
if (object.ReferenceEquals(resourceMan, null)) {
global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("AutoRest.Core.Properties.Resources", typeof(Resources).GetTypeInfo().Assembly);
resourceMan = temp;
}
return resourceMan;
}
}
/// <summary>
/// Overrides the current thread's CurrentUICulture property for all
/// resource lookups using this strongly typed resource class.
/// </summary>
[global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)]
public static global::System.Globalization.CultureInfo Culture {
get {
return resourceCulture;
}
set {
resourceCulture = value;
}
}
/// <summary>
/// Looks up a localized string similar to Top level properties should be one of name, type, id, location, properties, tags, plan, sku, etag, managedBy, identity. Model definition '{0}' has extra properties ['{1}']..
/// </summary>
public static string AllowedTopLevelProperties {
get {
return ResourceManager.GetString("AllowedTopLevelProperties", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Inline/anonymous models must not be used, instead define a schema with a model name in the "definitions" section and refer to it. This allows operations to share the models..
/// </summary>
public static string AnonymousTypesDiscouraged {
get {
return ResourceManager.GetString("AnonymousTypesDiscouraged", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to API Version must be in the format: yyyy-MM-dd, optionally followed by -preview, -alpha, -beta, -rc, -privatepreview..
/// </summary>
public static string APIVersionFormatIsNotValid {
get {
return ResourceManager.GetString("APIVersionFormatIsNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Top level property names should not be repeated inside the properties bag for ARM resource '{0}'. Properties [{1}] conflict with ARM top level properties. Please rename these..
/// </summary>
public static string ArmPropertiesBagValidationMessage {
get {
return ResourceManager.GetString("ArmPropertiesBagValidationMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to AutoRest Core {0}.
/// </summary>
public static string AutoRestCore {
get {
return ResourceManager.GetString("AutoRestCore", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Property named: "{0}", must follow camelCase style. Example: "{1}"..
/// </summary>
public static string BodyPropertyNameCamelCase {
get {
return ResourceManager.GetString("BodyPropertyNameCamelCase", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Booleans are not descriptive and make them hard to use. Instead use string enums with allowed set of values defined..
/// </summary>
public static string BooleanPropertyNotRecommended {
get {
return ResourceManager.GetString("BooleanPropertyNotRecommended", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Collection object {0} returned by list operation {1} with 'x-ms-pageable' extension, has no property named 'value'..
/// </summary>
public static string CollectionObjectPropertiesNamingMessage {
get {
return ResourceManager.GetString("CollectionObjectPropertiesNamingMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Plugins:
/// CSharp:
/// TypeName: PluginCs, AutoRest.CSharp
/// Azure.CSharp:
/// TypeName: PluginCsa, AutoRest.CSharp.Azure
/// Azure.CSharp.Fluent:
/// TypeName: PluginCsaf, AutoRest.CSharp.Azure.Fluent
/// Ruby:
/// TypeName: PluginRb, AutoRest.Ruby
/// Azure.Ruby:
/// TypeName: PluginRba, AutoRest.Ruby.Azure
/// NodeJS:
/// TypeName: PluginJs, AutoRest.NodeJS
/// Azure.NodeJS:
/// TypeName: PluginJsa, AutoRest.NodeJS.Azure
/// Python:
/// TypeName: PluginPy, AutoRest.Python
/// Azure.Python:
/// TypeNa [rest of string was truncated]";.
/// </summary>
public static string ConfigurationKnownPlugins {
get {
return ResourceManager.GetString("ConfigurationKnownPlugins", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Property named: "{0}", for definition: "{1}" must follow camelCase style. Example: "{2}"..
/// </summary>
public static string DefinitionsPropertiesNameCamelCase {
get {
return ResourceManager.GetString("DefinitionsPropertiesNameCamelCase", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to 'Delete' operation must not have a request body..
/// </summary>
public static string DeleteMustNotHaveRequestBody {
get {
return ResourceManager.GetString("DeleteMustNotHaveRequestBody", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to 'DELETE' operation '{0}' must use method name 'Delete'..
/// </summary>
public static string DeleteOperationNameNotValid {
get {
return ResourceManager.GetString("DeleteOperationNameNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to The value provided for description is not descriptive enough. Accurate and descriptive description is essential for maintaining reference documentation..
/// </summary>
public static string DescriptionNotDescriptive {
get {
return ResourceManager.GetString("DescriptionNotDescriptive", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Empty x-ms-client-name property..
/// </summary>
public static string EmptyClientName {
get {
return ResourceManager.GetString("EmptyClientName", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Error generating client model: {0}.
/// </summary>
public static string ErrorGeneratingClientModel {
get {
return ResourceManager.GetString("ErrorGeneratingClientModel", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Error loading {0} assembly: {1}.
/// </summary>
public static string ErrorLoadingAssembly {
get {
return ResourceManager.GetString("ErrorLoadingAssembly", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Error saving generated code: {0}.
/// </summary>
public static string ErrorSavingGeneratedCode {
get {
return ResourceManager.GetString("ErrorSavingGeneratedCode", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Plugin {0} not found.
/// </summary>
public static string ExtensionNotFound {
get {
return ResourceManager.GetString("ExtensionNotFound", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Successfully initialized {0} Code Generator {1}.
/// </summary>
public static string GeneratorInitialized {
get {
return ResourceManager.GetString("GeneratorInitialized", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to 'GET' operation '{0}' must use method name 'Get' or Method name start with 'List'.
/// </summary>
public static string GetOperationNameNotValid {
get {
return ResourceManager.GetString("GetOperationNameNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Guid used at the #/Definitions/{1}/.../{0}. Usage of Guid is not recommanded. If GUIDs are absolutely required in your service, please get sign off from the Azure API review board..
/// </summary>
public static string GuidUsageNotRecommended {
get {
return ResourceManager.GetString("GuidUsageNotRecommended", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Permissible values for HTTP Verb are delete,get,put,patch,head,options,post. .
/// </summary>
public static string HttpVerbIsNotValid {
get {
return ResourceManager.GetString("HttpVerbIsNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Initializing code generator..
/// </summary>
public static string InitializingCodeGenerator {
get {
return ResourceManager.GetString("InitializingCodeGenerator", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Initializing modeler..
/// </summary>
public static string InitializingModeler {
get {
return ResourceManager.GetString("InitializingModeler", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to The default value is not one of the values enumerated as valid for this element..
/// </summary>
public static string InvalidDefault {
get {
return ResourceManager.GetString("InvalidDefault", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Property name {0} cannot be used as an Identifier, as it contains only invalid characters..
/// </summary>
public static string InvalidIdentifierName {
get {
return ResourceManager.GetString("InvalidIdentifierName", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to When property is modeled as "readOnly": true then x-ms-mutability extension can only have "read" value. When property is modeled as "readOnly": false then applying x-ms-mutability extension with only "read" value is not allowed. Extension contains invalid values: '{0}'..
/// </summary>
public static string InvalidMutabilityValueForReadOnly {
get {
return ResourceManager.GetString("InvalidMutabilityValueForReadOnly", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to '{0}' code generator does not support code generation to a single file..
/// </summary>
public static string LanguageDoesNotSupportSingleFileGeneration {
get {
return ResourceManager.GetString("LanguageDoesNotSupportSingleFileGeneration", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Since operation '{0}' response has model definition '{1}', it should be of the form "*_list*".
/// </summary>
public static string ListOperationsNamingWarningMessage {
get {
return ResourceManager.GetString("ListOperationsNamingWarningMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to A '{0}' operation '{1}' with x-ms-long-running-operation extension must have a valid terminal success status code {2}..
/// </summary>
public static string LongRunningResponseNotValid {
get {
return ResourceManager.GetString("LongRunningResponseNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to {0} lacks 'description' property. Consider adding a 'description' element. Accurate description is essential for maintaining reference documentation..
/// </summary>
public static string MissingDescription {
get {
return ResourceManager.GetString("MissingDescription", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Successfully initialized modeler {0} v {1}..
/// </summary>
public static string ModelerInitialized {
get {
return ResourceManager.GetString("ModelerInitialized", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to For better generated code quality, remove all references to "msdn.microsoft.com"..
/// </summary>
public static string MsdnReferencesDiscouraged {
get {
return ResourceManager.GetString("MsdnReferencesDiscouraged", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to {0} (already used in {1}).
/// </summary>
public static string NamespaceConflictReasonMessage {
get {
return ResourceManager.GetString("NamespaceConflictReasonMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Please make sure that media types other than 'application/json' are supported by your service..
/// </summary>
public static string NonAppJsonTypeNotSupported {
get {
return ResourceManager.GetString("NonAppJsonTypeNotSupported", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Only 1 underscore is permitted in the operation id, following Noun_Verb conventions..
/// </summary>
public static string OnlyOneUnderscoreAllowedInOperationId {
get {
return ResourceManager.GetString("OnlyOneUnderscoreAllowedInOperationId", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to OperationId is required for all operations. Please add it for '{0}' operation of '{1}' path..
/// </summary>
public static string OperationIdMissing {
get {
return ResourceManager.GetString("OperationIdMissing", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to OperationId has a noun that conflicts with one of the model names in definitions section. The model name will be disambiguated to '{0}Model'. Consider using the plural form of '{1}' to avoid this..
/// </summary>
public static string OperationIdNounConflictingModelNamesMessage {
get {
return ResourceManager.GetString("OperationIdNounConflictingModelNamesMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Per the Noun_Verb convention for Operation Ids, the noun '{0}' should not appear after the underscore..
/// </summary>
public static string OperationIdNounInVerb {
get {
return ResourceManager.GetString("OperationIdNounInVerb", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Parameter "subscriptionId" is not allowed in the operations section, define it in the global parameters section instead.
/// </summary>
public static string OperationParametersNotAllowedMessage {
get {
return ResourceManager.GetString("OperationParametersNotAllowedMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Operations API must be implemented for '{0}'..
/// </summary>
public static string OperationsAPINotImplemented {
get {
return ResourceManager.GetString("OperationsAPINotImplemented", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Parameter Must have the "name" property defined with non-empty string as its value.
/// </summary>
public static string ParametersPropertiesValidation {
get {
return ResourceManager.GetString("ParametersPropertiesValidation", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Parameter '{0}' is required..
/// </summary>
public static string ParameterValueIsMissing {
get {
return ResourceManager.GetString("ParameterValueIsMissing", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Parameter '{0}' value is not valid. Expect '{1}'.
/// </summary>
public static string ParameterValueIsNotValid {
get {
return ResourceManager.GetString("ParameterValueIsNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to 'PATCH' operation '{0}' must use method name 'Update'..
/// </summary>
public static string PatchOperationNameNotValid {
get {
return ResourceManager.GetString("PatchOperationNameNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to path cannot be null or an empty string or a string with white spaces while getting the parent directory.
/// </summary>
public static string PathCannotBeNullOrEmpty {
get {
return ResourceManager.GetString("PathCannotBeNullOrEmpty", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to {0} has different responses for PUT/GET/PATCH operations. The PUT/GET/PATCH operations must have same schema response..
/// </summary>
public static string PutGetPatchResponseInvalid {
get {
return ResourceManager.GetString("PutGetPatchResponseInvalid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to 'PUT' operation '{0}' must use method name 'Create'..
/// </summary>
public static string PutOperationNameNotValid {
get {
return ResourceManager.GetString("PutOperationNameNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to A PUT operation request body schema should be the same as its 200 response schema, to allow reusing the same entity between GET and PUT. If the schema of the PUT request body is a superset of the GET response body, make sure you have a PATCH operation to make the resource updatable. Operation: '{0}' Request Model: '{1}' Response Model: '{2}'.
/// </summary>
public static string PutOperationRequestResponseSchemaMessage {
get {
return ResourceManager.GetString("PutOperationRequestResponseSchemaMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to The 200 response model for an ARM PUT operation must have x-ms-azure-resource extension set to true in its hierarchy. Operation: '{0}' Model: '{1}'..
/// </summary>
public static string PutOperationResourceResponseValidationMessage {
get {
return ResourceManager.GetString("PutOperationResourceResponseValidationMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Property '{0}' is a required property. It should not be marked as 'readonly'..
/// </summary>
public static string RequiredReadOnlyPropertiesValidation {
get {
return ResourceManager.GetString("RequiredReadOnlyPropertiesValidation", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to A 'Resource' definition must have x-ms-azure-resource extension enabled and set to true..
/// </summary>
public static string ResourceIsMsResourceNotValid {
get {
return ResourceManager.GetString("ResourceIsMsResourceNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Model definition '{0}' must have the properties 'name', 'id' and 'type' in its hierarchy and these properties must be marked as readonly..
/// </summary>
public static string ResourceModelIsNotValid {
get {
return ResourceManager.GetString("ResourceModelIsNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to A swagger must have security definitions and must adhere to the specific structure..
/// </summary>
public static string SecurityDefinitionsStructureValidation {
get {
return ResourceManager.GetString("SecurityDefinitionsStructureValidation", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Parameter "{0}" is referenced but not defined in the global parameters section of Service Definition.
/// </summary>
public static string ServiceDefinitionParametersMissingMessage {
get {
return ResourceManager.GetString("ServiceDefinitionParametersMissingMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Sku Model is not valid. A Sku model must have 'name' property. It can also have 'tier', 'size', 'family', 'capacity' as optional properties..
/// </summary>
public static string SkuModelIsNotValid {
get {
return ResourceManager.GetString("SkuModelIsNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Azure Resource Management only supports HTTPS scheme..
/// </summary>
public static string SupportedSchemesWarningMessage {
get {
return ResourceManager.GetString("SupportedSchemesWarningMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Tracked resource '{0}' must have a get operation..
/// </summary>
public static string TrackedResourceGetOperationMissing {
get {
return ResourceManager.GetString("TrackedResourceGetOperationMissing", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to The child tracked resource, '{0}' with immediate parent '{1}', must have a list by immediate parent operation..
/// </summary>
public static string TrackedResourceListByImmediateParentOperationMissing {
get {
return ResourceManager.GetString("TrackedResourceListByImmediateParentOperationMissing", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to The tracked resource, '{0}', must have a list by resource group operation..
/// </summary>
public static string TrackedResourceListByResourceGroupOperationMissing {
get {
return ResourceManager.GetString("TrackedResourceListByResourceGroupOperationMissing", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to The tracked resource, '{0}', must have a list by subscriptions operation..
/// </summary>
public static string TrackedResourceListBySubscriptionsOperationMissing {
get {
return ResourceManager.GetString("TrackedResourceListBySubscriptionsOperationMissing", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Tracked resource '{0}' must have patch operation that at least supports the update of tags..
/// </summary>
public static string TrackedResourcePatchOperationMissing {
get {
return ResourceManager.GetString("TrackedResourcePatchOperationMissing", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Type '{0}' name should be assembly qualified. For example 'ClassName, AssemblyName'.
/// </summary>
public static string TypeShouldBeAssemblyQualified {
get {
return ResourceManager.GetString("TypeShouldBeAssemblyQualified", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Multiple resource providers are not allowed in a single spec. More than one the resource paths were found: '{0}'..
/// </summary>
public static string UniqueResourcePaths {
get {
return ResourceManager.GetString("UniqueResourcePaths", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to '{0}' is not a known format..
/// </summary>
public static string UnknownFormat {
get {
return ResourceManager.GetString("UnknownFormat", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Value of 'x-ms-client-name' cannot be the same as '{0}' Property/Model..
/// </summary>
public static string XmsClientNameInvalid {
get {
return ResourceManager.GetString("XmsClientNameInvalid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Paths in x-ms-paths must overload a normal path in the paths section, i.e. a path in the x-ms-paths must either be same as a path in the paths section or a path in the paths sections followed by additional parameters..
/// </summary>
public static string XMSPathBaseNotInPaths {
get {
return ResourceManager.GetString("XMSPathBaseNotInPaths", resourceCulture);
}
}
}
}
| 1 | 24,771 | curious: are we planning to add link to each of the rules in the message? | Azure-autorest | java |
@@ -248,6 +248,11 @@ func (exp *MockExplorer) GetDeposits(subChainID int64, offset int64, limit int64
return nil, nil
}
+// SettleDeposit settles deposit on sub-chain
+func (exp *MockExplorer) SettleDeposit(req explorer.SettleDepositRequest) (res explorer.SettleDepositResponse, err error) {
+ return explorer.SettleDepositResponse{}, nil
+}
+
func randInt64() int64 {
rand.Seed(time.Now().UnixNano())
amount := int64(0) | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package explorer
import (
"math/rand"
"strconv"
"time"
"github.com/iotexproject/iotex-core/explorer/idl/explorer"
)
// MockExplorer return an explorer for test purpose
type MockExplorer struct {
}
// GetBlockchainHeight returns the blockchain height
func (exp *MockExplorer) GetBlockchainHeight() (int64, error) {
return randInt64(), nil
}
// GetAddressBalance returns the balance of an address
func (exp *MockExplorer) GetAddressBalance(address string) (string, error) {
return randString(), nil
}
// GetAddressDetails returns the properties of an address
func (exp *MockExplorer) GetAddressDetails(address string) (explorer.AddressDetails, error) {
return explorer.AddressDetails{
Address: address,
TotalBalance: randString(),
}, nil
}
// GetLastTransfersByRange return transfers in [-(offset+limit-1), -offset] from block
// with height startBlockHeight
func (exp *MockExplorer) GetLastTransfersByRange(startBlockHeight int64, offset int64, limit int64, showCoinBase bool) ([]explorer.Transfer, error) {
var txs []explorer.Transfer
for i := int64(0); i < limit; i++ {
txs = append(txs, randTransaction())
}
return txs, nil
}
// GetTransferByID returns transfer by transfer id
func (exp *MockExplorer) GetTransferByID(transferID string) (explorer.Transfer, error) {
return randTransaction(), nil
}
// GetTransfersByAddress returns all transfers associate with an address
func (exp *MockExplorer) GetTransfersByAddress(address string, offset int64, limit int64) ([]explorer.Transfer, error) {
return exp.GetLastTransfersByRange(0, offset, limit, true)
}
// GetUnconfirmedTransfersByAddress returns all unconfirmed transfers in actpool associated with an address
func (exp *MockExplorer) GetUnconfirmedTransfersByAddress(address string, offset int64, limit int64) ([]explorer.Transfer, error) {
return exp.GetLastTransfersByRange(0, offset, limit, true)
}
// GetTransfersByBlockID returns transfers in a block
func (exp *MockExplorer) GetTransfersByBlockID(blockID string, offset int64, limit int64) ([]explorer.Transfer, error) {
return exp.GetLastTransfersByRange(0, offset, limit, true)
}
// GetLastVotesByRange return votes in [-(offset+limit-1), -offset] from block
// with height startBlockHeight
func (exp *MockExplorer) GetLastVotesByRange(startBlockHeight int64, offset int64, limit int64) ([]explorer.Vote, error) {
var votes []explorer.Vote
for i := int64(0); i < limit; i++ {
votes = append(votes, randVote())
}
return votes, nil
}
// GetVoteByID returns vote by vote id
func (exp *MockExplorer) GetVoteByID(voteID string) (explorer.Vote, error) {
return randVote(), nil
}
// GetVotesByAddress returns all votes associate with an address
func (exp *MockExplorer) GetVotesByAddress(address string, offset int64, limit int64) ([]explorer.Vote, error) {
return exp.GetLastVotesByRange(0, offset, limit)
}
// GetUnconfirmedVotesByAddress returns all unconfirmed votes in actpool associated with an address
func (exp *MockExplorer) GetUnconfirmedVotesByAddress(address string, offset int64, limit int64) ([]explorer.Vote, error) {
return exp.GetLastVotesByRange(0, offset, limit)
}
// GetVotesByBlockID returns votes in a block
func (exp *MockExplorer) GetVotesByBlockID(blkID string, offset int64, limit int64) ([]explorer.Vote, error) {
return exp.GetLastVotesByRange(0, offset, limit)
}
// GetReceiptByExecutionID gets receipt with corresponding execution id
func (exp *MockExplorer) GetReceiptByExecutionID(id string) (explorer.Receipt, error) {
return explorer.Receipt{}, nil
}
// GetLastExecutionsByRange return executions in [-(offset+limit-1), -offset] from block
// with height startBlockHeight
func (exp *MockExplorer) GetLastExecutionsByRange(startBlockHeight int64, offset int64, limit int64) ([]explorer.Execution, error) {
var executions []explorer.Execution
for i := int64(0); i < limit; i++ {
executions = append(executions, randExecution())
}
return executions, nil
}
// GetExecutionByID returns execution by execution id
func (exp *MockExplorer) GetExecutionByID(executionID string) (explorer.Execution, error) {
return randExecution(), nil
}
// GetExecutionsByAddress returns all executions associate with an address
func (exp *MockExplorer) GetExecutionsByAddress(address string, offset int64, limit int64) ([]explorer.Execution, error) {
return exp.GetLastExecutionsByRange(0, offset, limit)
}
// GetUnconfirmedExecutionsByAddress returns all unconfirmed executions in actpool associated with an address
func (exp *MockExplorer) GetUnconfirmedExecutionsByAddress(address string, offset int64, limit int64) ([]explorer.Execution, error) {
return exp.GetLastExecutionsByRange(0, offset, limit)
}
// GetExecutionsByBlockID returns executions in a block
func (exp *MockExplorer) GetExecutionsByBlockID(blkID string, offset int64, limit int64) ([]explorer.Execution, error) {
return exp.GetLastExecutionsByRange(0, offset, limit)
}
// GetLastBlocksByRange get block with height [offset-limit+1, offset]
func (exp *MockExplorer) GetLastBlocksByRange(offset int64, limit int64) ([]explorer.Block, error) {
var blks []explorer.Block
for i := int64(0); i < limit; i++ {
blks = append(blks, randBlock())
}
return blks, nil
}
// GetBlockByID returns block by block id
func (exp *MockExplorer) GetBlockByID(blkID string) (explorer.Block, error) {
return randBlock(), nil
}
// GetCoinStatistic returns stats in blockchain
func (exp *MockExplorer) GetCoinStatistic() (explorer.CoinStatistic, error) {
return explorer.CoinStatistic{
Height: randInt64(),
Supply: randString(),
}, nil
}
// GetConsensusMetrics returns the fake consensus metrics
func (exp *MockExplorer) GetConsensusMetrics() (explorer.ConsensusMetrics, error) {
delegates := []string{
randString(),
randString(),
randString(),
randString(),
}
return explorer.ConsensusMetrics{
LatestEpoch: randInt64(),
LatestDelegates: delegates,
LatestBlockProducer: delegates[0],
}, nil
}
// GetCandidateMetrics returns the fake delegates metrics
func (exp *MockExplorer) GetCandidateMetrics() (explorer.CandidateMetrics, error) {
candidate := explorer.Candidate{
Address: randString(),
TotalVote: randString(),
CreationHeight: randInt64(),
LastUpdateHeight: randInt64(),
IsDelegate: false,
IsProducer: false,
}
return explorer.CandidateMetrics{
Candidates: []explorer.Candidate{candidate},
}, nil
}
// GetCandidateMetricsByHeight returns the fake delegates metrics
func (exp *MockExplorer) GetCandidateMetricsByHeight(h int64) (explorer.CandidateMetrics, error) {
candidate := explorer.Candidate{
Address: randString(),
TotalVote: randString(),
CreationHeight: randInt64(),
LastUpdateHeight: randInt64(),
IsDelegate: false,
IsProducer: false,
}
return explorer.CandidateMetrics{
Candidates: []explorer.Candidate{candidate},
}, nil
}
// SendTransfer sends a fake transfer
func (exp *MockExplorer) SendTransfer(request explorer.SendTransferRequest) (explorer.SendTransferResponse, error) {
return explorer.SendTransferResponse{}, nil
}
// SendVote sends a fake vote
func (exp *MockExplorer) SendVote(request explorer.SendVoteRequest) (explorer.SendVoteResponse, error) {
return explorer.SendVoteResponse{}, nil
}
// PutSubChainBlock makes a fake put block request.
func (exp *MockExplorer) PutSubChainBlock(putBlockJSON explorer.PutSubChainBlockRequest) (resp explorer.PutSubChainBlockResponse, err error) {
return explorer.PutSubChainBlockResponse{}, nil
}
// SendAction makes a fake send action request.
func (exp *MockExplorer) SendAction(req explorer.SendActionRequest) (resp explorer.SendActionResponse, err error) {
return explorer.SendActionResponse{}, nil
}
// GetPeers returns a empty GetPeersResponse.
func (exp *MockExplorer) GetPeers() (explorer.GetPeersResponse, error) {
return explorer.GetPeersResponse{}, nil
}
// SendSmartContract sends a smart contract
func (exp *MockExplorer) SendSmartContract(request explorer.Execution) (explorer.SendSmartContractResponse, error) {
return explorer.SendSmartContractResponse{}, nil
}
// ReadExecutionState sends a smart contract
func (exp *MockExplorer) ReadExecutionState(request explorer.Execution) (string, error) {
return "100", nil
}
// GetBlockOrActionByHash get block or action by a hash
func (exp *MockExplorer) GetBlockOrActionByHash(hash string) (explorer.GetBlkOrActResponse, error) {
return explorer.GetBlkOrActResponse{}, nil
}
// CreateDeposit deposits the balance from main-chain to sub-chain
func (exp *MockExplorer) CreateDeposit(req explorer.CreateDepositRequest) (explorer.CreateDepositResponse, error) {
return explorer.CreateDepositResponse{}, nil
}
// GetDeposits returns the deposits of a sub-chain in the given range
func (exp *MockExplorer) GetDeposits(subChainID int64, offset int64, limit int64) ([]explorer.Deposit, error) {
return nil, nil
}
func randInt64() int64 {
rand.Seed(time.Now().UnixNano())
amount := int64(0)
for amount == int64(0) {
amount = int64(rand.Intn(100000000))
}
return amount
}
func randString() string {
return strconv.FormatInt(randInt64(), 10)
}
func randTransaction() explorer.Transfer {
return explorer.Transfer{
ID: randString(),
Sender: randString(),
Recipient: randString(),
Amount: randString(),
Fee: randString(),
Timestamp: randInt64(),
BlockID: randString(),
}
}
func randVote() explorer.Vote {
return explorer.Vote{
ID: randString(),
Timestamp: randInt64(),
BlockID: randString(),
Nonce: randInt64(),
Voter: randString(),
Votee: randString(),
}
}
func randExecution() explorer.Execution {
return explorer.Execution{
ID: randString(),
Timestamp: randInt64(),
BlockID: randString(),
Nonce: randInt64(),
Executor: randString(),
Contract: randString(),
Amount: randString(),
GasLimit: randInt64(),
GasPrice: randString(),
}
}
func randBlock() explorer.Block {
return explorer.Block{
ID: randString(),
Height: randInt64(),
Timestamp: randInt64(),
Transfers: randInt64(),
GenerateBy: explorer.BlockGenerator{
Name: randString(),
Address: randString(),
},
Amount: randString(),
Forged: randInt64(),
}
}
| 1 | 13,234 | line is 123 characters | iotexproject-iotex-core | go |
@@ -69,7 +69,7 @@ class AuthorizationService extends AbstractAuthenticationService
* whether a user is authenticated.
*
* @see \TYPO3\CMS\Core\Authentication\AbstractUserAuthentication::checkAuthentication()
- * @param array Array of user data
+ * @param array $user of user data
* @return int Returns 200 to grant access for the page indexer.
*/
public function authUser($user) | 1 | <?php
namespace ApacheSolrForTypo3\Solr\IndexQueue\FrontendHelper;
/***************************************************************
* Copyright notice
*
* (c) 2011-2015 Ingo Renner <[email protected]>
* All rights reserved
*
* This script is part of the TYPO3 project. The TYPO3 project is
* free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The GNU General Public License can be found at
* http://www.gnu.org/copyleft/gpl.html.
*
* This script is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This copyright notice MUST APPEAR in all copies of the script!
***************************************************************/
use TYPO3\CMS\Core\Utility\GeneralUtility;
use TYPO3\CMS\Sv\AbstractAuthenticationService;
/**
* Authentication service to authorize the Index Queue page indexer to access
* protected pages.
*
* @author Ingo Renner <[email protected]>
*/
class AuthorizationService extends AbstractAuthenticationService
{
/**
* User used when authenticating the page indexer for protected pages,
* to allow the indexer to access and protected content. May also allow to
* identify requests by the page indexer.
*
* @var string
*/
const SOLR_INDEXER_USERNAME = '__SolrIndexerUser__';
/**
* Gets a fake frontend user record to allow access to protected pages.
*
* @return array An array representing a frontend user.
*/
public function getUser()
{
return array(
'uid' => 0,
'username' => self::SOLR_INDEXER_USERNAME,
'authenticated' => true
);
}
/**
* Authenticates the page indexer frontend user to grant it access to
* protected pages and page content.
*
* Returns 200 which automatically grants access for the current fake page
* indexer user. A status of >= 200 also tells TYPO3 that it doesn't need to
* conduct other services that might be registered for "their opinion"
* whether a user is authenticated.
*
* @see \TYPO3\CMS\Core\Authentication\AbstractUserAuthentication::checkAuthentication()
* @param array Array of user data
* @return int Returns 200 to grant access for the page indexer.
*/
public function authUser($user)
{
// shouldn't happen, but in case we get a regular user we just
// pass it on to another (regular) auth service
$authenticationLevel = 100;
if ($user['username'] == self::SOLR_INDEXER_USERNAME) {
$authenticationLevel = 200;
}
return $authenticationLevel;
}
/**
* Creates user group records so that the page indexer is granted access to
* protected pages.
*
* @param array $user Data of user.
* @param array $knownGroups Group data array of already known groups. This is handy if you want select other related groups. Keys in this array are unique IDs of those groups.
* @return mixed Groups array, keys = uid which must be unique
*/
public function getGroups($user, $knownGroups)
{
$groupData = array();
$requestHandler = GeneralUtility::makeInstance('ApacheSolrForTypo3\\Solr\\IndexQueue\\PageIndexerRequestHandler');
$accessRootline = $requestHandler->getRequest()->getParameter('accessRootline');
if ($user['username'] == self::SOLR_INDEXER_USERNAME && !empty($accessRootline)) {
$accessRootline = GeneralUtility::makeInstance(
'ApacheSolrForTypo3\\Solr\\Access\\Rootline',
$accessRootline
);
$groups = $accessRootline->getGroups();
foreach ($groups as $groupId) {
// faking a user group record
$groupData[] = array(
'uid' => $groupId,
'pid' => 0,
'title' => '__SolrIndexerGroup__',
'TSconfig' => ''
);
}
}
return $groupData;
}
}
| 1 | 5,913 | same here; `@param array $user Array of user data` | TYPO3-Solr-ext-solr | php |
@@ -21,7 +21,7 @@ func TestString(t *testing.T) {
b.Append("a")
}
},
- sz: 256,
+ sz: 0,
want: []interface{}{
"a", "a", "a", "a", "a",
"a", "a", "a", "a", "a", | 1 | package array_test
import (
"testing"
"github.com/apache/arrow/go/arrow/memory"
"github.com/influxdata/flux/array"
)
func TestString(t *testing.T) {
for _, tc := range []struct {
name string
build func(b *array.StringBuilder)
sz int
want []interface{}
}{
{
name: "Constant",
build: func(b *array.StringBuilder) {
for i := 0; i < 10; i++ {
b.Append("a")
}
},
sz: 256,
want: []interface{}{
"a", "a", "a", "a", "a",
"a", "a", "a", "a", "a",
},
},
{
name: "RLE",
build: func(b *array.StringBuilder) {
for i := 0; i < 5; i++ {
b.Append("a")
}
for i := 0; i < 5; i++ {
b.Append("b")
}
},
sz: 256,
want: []interface{}{
"a", "a", "a", "a", "a",
"b", "b", "b", "b", "b",
},
},
{
name: "Random",
build: func(b *array.StringBuilder) {
for _, v := range []string{"a", "b", "c", "d", "e"} {
b.Append(v)
}
b.AppendNull()
for _, v := range []string{"g", "h", "i", "j"} {
b.Append(v)
}
},
sz: 256,
want: []interface{}{
"a", "b", "c", "d", "e",
nil, "g", "h", "i", "j",
},
},
} {
t.Run(tc.name, func(t *testing.T) {
mem := memory.NewCheckedAllocator(memory.DefaultAllocator)
defer mem.AssertSize(t, 0)
b := array.NewStringBuilder(mem)
tc.build(b)
arr := b.NewStringArray()
defer arr.Release()
mem.AssertSize(t, tc.sz)
if want, got := len(tc.want), arr.Len(); want != got {
t.Fatalf("unexpected length -want/+got:\n\t- %d\n\t+ %d", want, got)
}
for i, sz := 0, arr.Len(); i < sz; i++ {
if arr.IsValid(i) == arr.IsNull(i) {
t.Errorf("valid and null checks are not consistent for index %d", i)
}
if tc.want[i] == nil {
if arr.IsValid(i) {
t.Errorf("unexpected value -want/+got:\n\t- %v\n\t+ %v", tc.want[i], arr.Value(i))
}
} else if arr.IsNull(i) {
t.Errorf("unexpected value -want/+got:\n\t- %v\n\t+ %v", tc.want[i], nil)
} else {
want, got := tc.want[i].(string), arr.Value(i)
if want != got {
t.Errorf("unexpected value -want/+got:\n\t- %v\n\t+ %v", want, got)
}
}
}
})
}
}
func TestSlice(t *testing.T) {
for _, tc := range []struct {
name string
build func(mem memory.Allocator) array.Interface
i, j int
want []interface{}
}{
{
name: "Int",
build: func(mem memory.Allocator) array.Interface {
b := array.NewIntBuilder(mem)
for i := 0; i < 10; i++ {
if i == 6 {
b.AppendNull()
continue
}
b.Append(int64(i))
}
return b.NewArray()
},
i: 5,
j: 10,
want: []interface{}{
int64(5), nil, int64(7), int64(8), int64(9),
},
},
{
name: "Uint",
build: func(mem memory.Allocator) array.Interface {
b := array.NewUintBuilder(mem)
for i := 0; i < 10; i++ {
if i == 6 {
b.AppendNull()
continue
}
b.Append(uint64(i))
}
return b.NewArray()
},
i: 5,
j: 10,
want: []interface{}{
uint64(5), nil, uint64(7), uint64(8), uint64(9),
},
},
{
name: "Float",
build: func(mem memory.Allocator) array.Interface {
b := array.NewFloatBuilder(mem)
for i := 0; i < 10; i++ {
if i == 6 {
b.AppendNull()
continue
}
b.Append(float64(i))
}
return b.NewArray()
},
i: 5,
j: 10,
want: []interface{}{
float64(5), nil, float64(7), float64(8), float64(9),
},
},
{
name: "String_Constant",
build: func(mem memory.Allocator) array.Interface {
b := array.NewStringBuilder(mem)
for i := 0; i < 10; i++ {
b.Append("a")
}
return b.NewArray()
},
i: 5,
j: 10,
want: []interface{}{
"a", "a", "a", "a", "a",
},
},
{
name: "String_RLE",
build: func(mem memory.Allocator) array.Interface {
b := array.NewStringBuilder(mem)
for i := 0; i < 5; i++ {
b.Append("a")
}
for i := 0; i < 5; i++ {
b.Append("b")
}
return b.NewArray()
},
i: 5,
j: 10,
want: []interface{}{
"b", "b", "b", "b", "b",
},
},
{
name: "String_Random",
build: func(mem memory.Allocator) array.Interface {
b := array.NewStringBuilder(mem)
for _, v := range []string{"a", "b", "c", "d", "e"} {
b.Append(v)
}
b.AppendNull()
for _, v := range []string{"g", "h", "i", "j"} {
b.Append(v)
}
return b.NewArray()
},
i: 5,
j: 10,
want: []interface{}{
nil, "g", "h", "i", "j",
},
},
{
name: "Boolean",
build: func(mem memory.Allocator) array.Interface {
b := array.NewBooleanBuilder(mem)
for i := 0; i < 10; i++ {
if i == 6 {
b.AppendNull()
continue
}
b.Append(i%2 == 0)
}
return b.NewArray()
},
i: 5,
j: 10,
want: []interface{}{
false, nil, false, true, false,
},
},
} {
t.Run(tc.name, func(t *testing.T) {
mem := memory.NewCheckedAllocator(memory.DefaultAllocator)
defer mem.AssertSize(t, 0)
arr := tc.build(mem)
slice := array.Slice(arr, tc.i, tc.j)
arr.Release()
defer slice.Release()
if want, got := len(tc.want), slice.Len(); want != got {
t.Fatalf("unexpected length -want/+got:\n\t- %d\n\t+ %d", want, got)
}
for i, sz := 0, slice.Len(); i < sz; i++ {
want, got := tc.want[i], getValue(slice, i)
if want != got {
t.Errorf("unexpected value -want/+got:\n\t- %v\n\t+ %v", want, got)
}
}
})
}
}
func getValue(arr array.Interface, i int) interface{} {
if arr.IsNull(i) {
return nil
}
switch arr := arr.(type) {
case *array.Int:
return arr.Value(i)
case *array.Uint:
return arr.Value(i)
case *array.Float:
return arr.Value(i)
case *array.String:
return arr.Value(i)
case *array.Boolean:
return arr.Value(i)
default:
panic("unimplemented")
}
}
| 1 | 16,476 | A quick read of this test indicates that we're changing the behavior of this api, and so the test needs updating. Is that right? | influxdata-flux | go |
@@ -5,13 +5,13 @@
using System;
using Android.Content;
using Android.Runtime;
-using Android.Support.V7.Widget;
+using AndroidX.RecyclerView.Widget;
using Java.Lang;
using MvvmCross.Logging;
-namespace MvvmCross.Droid.Support.V7.RecyclerView
+namespace MvvmCross.DroidX.RecyclerView
{
- [Register("mvvmcross.droid.support.v7.recyclerview.MvxGuardedLinearLayoutManager")]
+ [Register("mvvmcross.DroidX.recyclerview.MvxGuardedLinearLayoutManager")]
public class MvxGuardedLinearLayoutManager : LinearLayoutManager
{
public MvxGuardedLinearLayoutManager(Context context) : base(context) | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MS-PL license.
// See the LICENSE file in the project root for more information.
using System;
using Android.Content;
using Android.Runtime;
using Android.Support.V7.Widget;
using Java.Lang;
using MvvmCross.Logging;
namespace MvvmCross.Droid.Support.V7.RecyclerView
{
[Register("mvvmcross.droid.support.v7.recyclerview.MvxGuardedLinearLayoutManager")]
public class MvxGuardedLinearLayoutManager : LinearLayoutManager
{
public MvxGuardedLinearLayoutManager(Context context) : base(context)
{
}
[Android.Runtime.Preserve(Conditional = true)]
protected MvxGuardedLinearLayoutManager(IntPtr ptr, JniHandleOwnership transfer) : base(ptr, transfer)
{
}
/// <summary>
/// Fix issue like https://code.google.com/p/android/issues/detail?id=77846#c1 but may not be exactly the same.
/// https://stackoverflow.com/questions/30220771/recyclerview-inconsistency-detected-invalid-item-position?page=1&tab=active#tab-top
/// </summary>
public override bool SupportsPredictiveItemAnimations() => false;
/// <summary>
/// This should not be needed anymore, as it should be caused by SupportsPredictiveItemAnimations
/// </summary>
public override void OnLayoutChildren(Android.Support.V7.Widget.RecyclerView.Recycler recycler,
Android.Support.V7.Widget.RecyclerView.State state)
{
try
{
base.OnLayoutChildren(recycler, state);
}
catch (IndexOutOfBoundsException e)
{
MvxAndroidLog.Instance.Warn(
"Workaround of issue - https://code.google.com/p/android/issues/detail?id=77846#c1 - IndexOutOfBoundsException " +
e.Message);
}
}
}
}
| 1 | 15,185 | Register needs lowercasing | MvvmCross-MvvmCross | .cs |
@@ -134,6 +134,9 @@ public class Constants {
public static final String EXECUTION_SOURCE_SCHEDULED = "schedule";
public static final String EXECUTION_SOURCE_EVENT = "event";
+ // Should validate proxy user
+ public static final boolean DEFAULT_VALIDATE_PROXY_USER = false;
+
public static class ConfigurationKeys {
public static final String AZKABAN_CLUSTER_NAME = "azkaban.cluster.name"; | 1 | /*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban;
import java.time.Duration;
/**
* Constants used in configuration files or shared among classes.
*
* <p>Conventions:
*
* <p>Internal constants to be put in the {@link Constants} class
*
* <p>Configuration keys to be put in the {@link ConfigurationKeys} class
*
* <p>Flow level properties keys to be put in the {@link FlowProperties} class
*
* <p>Job level Properties keys to be put in the {@link JobProperties} class
*
* <p>Use '.' to separate name spaces and '_" to separate words in the same namespace. e.g.
* azkaban.job.some_key</p>
*/
public class Constants {
// Azkaban Flow Versions
public static final double DEFAULT_AZKABAN_FLOW_VERSION = 1.0;
public static final double AZKABAN_FLOW_VERSION_2_0 = 2.0;
// Flow 2.0 file suffix
public static final String PROJECT_FILE_SUFFIX = ".project";
public static final String FLOW_FILE_SUFFIX = ".flow";
// Flow 2.0 node type
public static final String NODE_TYPE = "type";
public static final String FLOW_NODE_TYPE = "flow";
// Flow 2.0 flow and job path delimiter
public static final String PATH_DELIMITER = ":";
// Job properties override suffix
public static final String JOB_OVERRIDE_SUFFIX = ".jor";
// Names and paths of various file names to configure Azkaban
public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties";
public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties";
public static final String DEFAULT_CONF_PATH = "conf";
public static final String DEFAULT_EXECUTOR_PORT_FILE = "executor.port";
public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app";
// Internal username used to perform SLA action
public static final String AZKABAN_SLA_CHECKER_USERNAME = "azkaban_sla";
// Memory check retry interval when OOM in ms
public static final long MEMORY_CHECK_INTERVAL_MS = 1000 * 60 * 1;
// Max number of memory check retry
public static final int MEMORY_CHECK_RETRY_LIMIT = 720;
public static final int DEFAULT_PORT_NUMBER = 8081;
public static final int DEFAULT_SSL_PORT_NUMBER = 8443;
public static final int DEFAULT_JETTY_MAX_THREAD_COUNT = 20;
// Configures the form limits for the web application
public static final int MAX_FORM_CONTENT_SIZE = 10 * 1024 * 1024;
// One Schedule's default End Time: 01/01/2050, 00:00:00, UTC
public static final long DEFAULT_SCHEDULE_END_EPOCH_TIME = 2524608000000L;
// Default flow trigger max wait time
public static final Duration DEFAULT_FLOW_TRIGGER_MAX_WAIT_TIME = Duration.ofDays(10);
public static final Duration MIN_FLOW_TRIGGER_WAIT_TIME = Duration.ofMinutes(1);
public static final int DEFAULT_MIN_AGE_FOR_CLASSIFYING_A_FLOW_AGED_MINUTES = 20;
// The flow exec id for a flow trigger instance which hasn't started a flow yet
public static final int UNASSIGNED_EXEC_ID = -1;
// The flow exec id for a flow trigger instance unable to trigger a flow yet
public static final int FAILED_EXEC_ID = -2;
// Default locked flow error message
public static final String DEFAULT_LOCKED_FLOW_ERROR_MESSAGE =
"Flow %s in project %s is locked. This is either a repeatedly failing flow, or an ineffcient"
+ " flow. Please refer to the Dr. Elephant report for this flow for more information.";
// Default maximum number of concurrent runs for a single flow
public static final int DEFAULT_MAX_ONCURRENT_RUNS_ONEFLOW = 30;
// How often executors will poll new executions in Poll Dispatch model
public static final int DEFAULT_AZKABAN_POLLING_INTERVAL_MS = 1000;
// Executors can use cpu load calculated from this period to take/skip polling turns
public static final int DEFAULT_AZKABAN_POLLING_CRITERIA_CPU_LOAD_PERIOD_SEC = 60;
// Default value to feature enable setting. To be backward compatible, this value === FALSE
public static final boolean DEFAULT_AZKABAN_RAMP_ENABLED = false;
// Due to multiple AzkabanExec Server instance scenario, it will be required to persistent the ramp result into the DB.
// However, Frequent data persistence will sacrifice the performance with limited data accuracy.
// This setting value controls to push result into DB every N finished ramped workflows
public static final int DEFAULT_AZKABAN_RAMP_STATUS_PUSH_INTERVAL_MAX = 20;
// Due to multiple AzkabanExec Server instance, it will be required to persistent the ramp result into the DB.
// However, Frequent data persistence will sacrifice the performance with limited data accuracy.
// This setting value controls to pull result from DB every N new ramped workflows
public static final int DEFAULT_AZKABAN_RAMP_STATUS_PULL_INTERVAL_MAX = 50;
// Use Polling Service to sync the ramp status cross EXEC Server.
public static final boolean DEFAULT_AZKABAN_RAMP_STATUS_POOLING_ENABLED = false;
// How often executors will poll ramp status in Poll Dispatch model
public static final int DEFAULT_AZKABAN_RAMP_STATUS_POLLING_INTERVAL = 10;
// Username to be sent to UserManager when OAuth is in use, and real username is not available:
public static final String OAUTH_USERNAME_PLACEHOLDER = "<OAuth>";
// Used by UserManager for password validation (to tell apart real passwords from auth codes).
// Empirically, passwords are shorter than this, and ACs are longer:
public static final int OAUTH_MIN_AUTHCODE_LENGTH = 80;
// Used (or should be used) wherever a string representation of UTF_8 charset is needed:
public static final String UTF_8 = java.nio.charset.StandardCharsets.UTF_8.toString();
// Specifies the source(adhoc, scheduled, event) from where flow execution is triggered
public static final String EXECUTION_SOURCE_ADHOC = "adhoc";
public static final String EXECUTION_SOURCE_SCHEDULED = "schedule";
public static final String EXECUTION_SOURCE_EVENT = "event";
public static class ConfigurationKeys {
public static final String AZKABAN_CLUSTER_NAME = "azkaban.cluster.name";
public static final String AZKABAN_GLOBAL_PROPERTIES_EXT_PATH = "executor.global.properties";
// Property to enable appropriate dispatch model
public static final String AZKABAN_EXECUTION_DISPATCH_METHOD = "azkaban.execution.dispatch.method";
// Configures Azkaban to use new polling model for dispatching
public static final String AZKABAN_POLLING_INTERVAL_MS = "azkaban.polling.interval.ms";
public static final String AZKABAN_POLLING_LOCK_ENABLED = "azkaban.polling.lock.enabled";
public static final String AZKABAN_POLLING_CRITERIA_FLOW_THREADS_AVAILABLE =
"azkaban.polling_criteria.flow_threads_available";
public static final String AZKABAN_POLLING_CRITERIA_MIN_FREE_MEMORY_GB =
"azkaban.polling_criteria.min_free_memory_gb";
public static final String AZKABAN_POLLING_CRITERIA_MAX_CPU_UTILIZATION_PCT =
"azkaban.polling_criteria.max_cpu_utilization_pct";
public static final String AZKABAN_POLLING_CRITERIA_CPU_LOAD_PERIOD_SEC =
"azkaban.polling_criteria.cpu_load_period_sec";
// Configures properties for Azkaban executor health check
public static final String AZKABAN_EXECUTOR_HEALTHCHECK_INTERVAL_MIN = "azkaban.executor.healthcheck.interval.min";
public static final String AZKABAN_EXECUTOR_MAX_FAILURE_COUNT = "azkaban.executor.max.failurecount";
public static final String AZKABAN_ADMIN_ALERT_EMAIL = "azkaban.admin.alert.email";
// Configures Azkaban Flow Version in project YAML file
public static final String AZKABAN_FLOW_VERSION = "azkaban-flow-version";
// These properties are configurable through azkaban.properties
public static final String AZKABAN_PID_FILENAME = "azkaban.pid.filename";
// Defines a list of external links, each referred to as a topic
public static final String AZKABAN_SERVER_EXTERNAL_TOPICS = "azkaban.server.external.topics";
// External URL template of a given topic, specified in the list defined above
public static final String AZKABAN_SERVER_EXTERNAL_TOPIC_URL = "azkaban.server.external.${topic}.url";
// Designates one of the external link topics to correspond to an execution analyzer
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC = "azkaban.server.external.analyzer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_LABEL = "azkaban.server.external.analyzer.label";
// Designates one of the external link topics to correspond to a job log viewer
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_TOPIC = "azkaban.server.external.logviewer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_LABEL = "azkaban.server.external.logviewer.label";
/*
* Hadoop/Spark user job link.
* Example:
* a) azkaban.server.external.resource_manager_job_url=http://***rm***:8088/cluster/app/application_${application.id}
* b) azkaban.server.external.history_server_job_url=http://***jh***:19888/jobhistory/job/job_${application.id}
* c) azkaban.server.external.spark_history_server_job_url=http://***sh***:18080/history/application_${application.id}/1/jobs
* */
public static final String HADOOP_CLUSTER_URL = "azkaban.server.external.hadoop_cluster_url";
public static final String RESOURCE_MANAGER_JOB_URL = "azkaban.server.external.resource_manager_job_url";
public static final String HISTORY_SERVER_JOB_URL = "azkaban.server.external.history_server_job_url";
public static final String SPARK_HISTORY_SERVER_JOB_URL = "azkaban.server.external.spark_history_server_job_url";
// Configures the Kafka appender for logging user jobs, specified for the exec server
public static final String AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST = "azkaban.server.logging.kafka.brokerList";
public static final String AZKABAN_SERVER_LOGGING_KAFKA_TOPIC = "azkaban.server.logging.kafka.topic";
// Represent the class name of azkaban metrics reporter.
public static final String CUSTOM_METRICS_REPORTER_CLASS_NAME = "azkaban.metrics.reporter.name";
// Represent the metrics server URL.
public static final String METRICS_SERVER_URL = "azkaban.metrics.server.url";
public static final String IS_METRICS_ENABLED = "azkaban.is.metrics.enabled";
public static final String MIN_AGE_FOR_CLASSIFYING_A_FLOW_AGED_MINUTES = "azkaban.metrics"
+ ".min_age_for_classifying_a_flow_aged_minutes";
// User facing web server configurations used to construct the user facing server URLs. They are useful when there is a reverse proxy between Azkaban web servers and users.
// enduser -> myazkabanhost:443 -> proxy -> localhost:8081
// when this parameters set then these parameters are used to generate email links.
// if these parameters are not set then jetty.hostname, and jetty.port(if ssl configured jetty.ssl.port) are used.
public static final String AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME = "azkaban.webserver.external_hostname";
public static final String AZKABAN_WEBSERVER_EXTERNAL_SSL_PORT = "azkaban.webserver.external_ssl_port";
public static final String AZKABAN_WEBSERVER_EXTERNAL_PORT = "azkaban.webserver.external_port";
// Hostname for the host, if not specified, canonical hostname will be used
public static final String AZKABAN_SERVER_HOST_NAME = "azkaban.server.hostname";
// List of users we prevent azkaban from running flows as. (ie: root, azkaban)
public static final String BLACK_LISTED_USERS = "azkaban.server.blacklist.users";
// Path name of execute-as-user executable
public static final String AZKABAN_SERVER_NATIVE_LIB_FOLDER = "azkaban.native.lib";
// Name of *nix group associated with the process running Azkaban
public static final String AZKABAN_SERVER_GROUP_NAME = "azkaban.group.name";
// Legacy configs section, new configs should follow the naming convention of azkaban.server.<rest of the name> for server configs.
// Jetty server configurations.
public static final String JETTY_HEADER_BUFFER_SIZE = "jetty.headerBufferSize";
public static final String JETTY_USE_SSL = "jetty.use.ssl";
public static final String JETTY_SSL_PORT = "jetty.ssl.port";
public static final String JETTY_PORT = "jetty.port";
public static final String EXECUTOR_PORT_FILE = "executor.portfile";
// To set a fixed port for executor-server. Otherwise some available port is used.
public static final String EXECUTOR_PORT = "executor.port";
public static final String DEFAULT_TIMEZONE_ID = "default.timezone.id";
// Boolean config set on the Web server to prevent users from creating projects. When set to
// true only admins or users with CREATEPROJECTS permission can create projects.
public static final String LOCKDOWN_CREATE_PROJECTS_KEY = "lockdown.create.projects";
// Boolean config set on the Web server to prevent users from uploading projects. When set to
// true only admins or users with UPLOADPROJECTS permission can upload projects.
public static final String LOCKDOWN_UPLOAD_PROJECTS_KEY = "lockdown.upload.projects";
// Max flow running time in mins, server will kill flows running longer than this setting.
// if not set or <= 0, then there's no restriction on running time.
public static final String AZKABAN_MAX_FLOW_RUNNING_MINS = "azkaban.server.flow.max.running.minutes";
// Maximum number of tries to download a dependency (no more retry attempts will be made after this many download failures)
public static final String AZKABAN_DEPENDENCY_MAX_DOWNLOAD_TRIES = "azkaban.dependency.max.download.tries";
public static final String AZKABAN_DEPENDENCY_DOWNLOAD_THREADPOOL_SIZE =
"azkaban.dependency.download.threadpool.size";
public static final String AZKABAN_STORAGE_TYPE = "azkaban.storage.type";
public static final String AZKABAN_STORAGE_LOCAL_BASEDIR = "azkaban.storage.local.basedir";
public static final String HADOOP_CONF_DIR_PATH = "hadoop.conf.dir.path";
// This really should be azkaban.storage.hdfs.project_root.uri
public static final String AZKABAN_STORAGE_HDFS_PROJECT_ROOT_URI = "azkaban.storage.hdfs.root.uri";
public static final String AZKABAN_STORAGE_CACHE_DEPENDENCY_ENABLED = "azkaban.storage.cache.dependency.enabled";
public static final String AZKABAN_STORAGE_CACHE_DEPENDENCY_ROOT_URI = "azkaban.storage.cache.dependency_root.uri";
public static final String AZKABAN_STORAGE_ORIGIN_DEPENDENCY_ROOT_URI = "azkaban.storage.origin.dependency_root.uri";
public static final String AZKABAN_KERBEROS_PRINCIPAL = "azkaban.kerberos.principal";
public static final String AZKABAN_KEYTAB_PATH = "azkaban.keytab.path";
public static final String PROJECT_TEMP_DIR = "project.temp.dir";
// Event reporting properties
public static final String AZKABAN_EVENT_REPORTING_CLASS_PARAM =
"azkaban.event.reporting.class";
public static final String AZKABAN_EVENT_REPORTING_ENABLED = "azkaban.event.reporting.enabled";
// Comma separated list of properties to propagate from flow to Event reporter metadata
public static final String AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE = "azkaban.event.reporting.propagateProperties";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_BROKERS =
"azkaban.event.reporting.kafka.brokers";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_TOPIC =
"azkaban.event.reporting.kafka.topic";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_SCHEMA_REGISTRY_URL =
"azkaban.event.reporting.kafka.schema.registry.url";
/*
* The max number of artifacts retained per project.
* Accepted Values:
* - 0 : Save all artifacts. No clean up is done on storage.
* - 1, 2, 3, ... (any +ve integer 'n') : Maintain 'n' latest versions in storage
*
* Note: Having an unacceptable value results in an exception and the service would REFUSE
* to start.
*
* Example:
* a) azkaban.storage.artifact.max.retention=all
* implies save all artifacts
* b) azkaban.storage.artifact.max.retention=3
* implies save latest 3 versions saved in storage.
**/
public static final String AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION = "azkaban.storage.artifact.max.retention";
// enable quartz scheduler and flow trigger if true.
public static final String ENABLE_QUARTZ = "azkaban.server.schedule.enable_quartz";
public static final String CUSTOM_CREDENTIAL_NAME = "azkaban.security.credential";
public static final String OAUTH_CREDENTIAL_NAME = "azkaban.oauth.credential";
public static final String SECURITY_USER_GROUP = "azkaban.security.user.group";
public static final String CSR_KEYSTORE_LOCATION = "azkaban.csr.keystore.location";
// dir to keep dependency plugins
public static final String DEPENDENCY_PLUGIN_DIR = "azkaban.dependency.plugin.dir";
public static final String USE_MULTIPLE_EXECUTORS = "azkaban.use.multiple.executors";
public static final String MAX_CONCURRENT_RUNS_ONEFLOW = "azkaban.max.concurrent.runs.oneflow";
// list of whitelisted flows, with specific max number of concurrent runs. Format:
// <project 1>,<flow 1>,<number>;<project 2>,<flow 2>,<number>
public static final String CONCURRENT_RUNS_ONEFLOW_WHITELIST =
"azkaban.concurrent.runs.oneflow.whitelist";
public static final String WEBSERVER_QUEUE_SIZE = "azkaban.webserver.queue.size";
public static final String ACTIVE_EXECUTOR_REFRESH_IN_MS =
"azkaban.activeexecutor.refresh.milisecinterval";
public static final String ACTIVE_EXECUTOR_REFRESH_IN_NUM_FLOW =
"azkaban.activeexecutor.refresh.flowinterval";
public static final String EXECUTORINFO_REFRESH_MAX_THREADS =
"azkaban.executorinfo.refresh.maxThreads";
public static final String MAX_DISPATCHING_ERRORS_PERMITTED = "azkaban.maxDispatchingErrors";
public static final String EXECUTOR_SELECTOR_FILTERS = "azkaban.executorselector.filters";
public static final String EXECUTOR_SELECTOR_COMPARATOR_PREFIX =
"azkaban.executorselector.comparator.";
public static final String QUEUEPROCESSING_ENABLED = "azkaban.queueprocessing.enabled";
public static final String QUEUE_PROCESSOR_WAIT_IN_MS = "azkaban.queue.processor.wait.in.ms";
public static final String SESSION_TIME_TO_LIVE = "session.time.to.live";
// allowed max number of sessions per user per IP
public static final String MAX_SESSION_NUMBER_PER_IP_PER_USER = "azkaban.session"
+ ".max_number_per_ip_per_user";
// allowed max size of shared project dir (percentage of partition size), e.g 0.8
public static final String PROJECT_CACHE_SIZE_PERCENTAGE =
"azkaban.project_cache_size_percentage_of_disk";
public static final String PROJECT_CACHE_THROTTLE_PERCENTAGE =
"azkaban.project_cache_throttle_percentage";
// how many older versions of project files are kept in DB before deleting them
public static final String PROJECT_VERSION_RETENTION = "project.version.retention";
// number of rows to be displayed on the executions page.
public static final String DISPLAY_EXECUTION_PAGE_SIZE = "azkaban.display.execution_page_size";
// locked flow error message. Parameters passed in are the flow name and project name.
public static final String AZKABAN_LOCKED_FLOW_ERROR_MESSAGE =
"azkaban.locked.flow.error.message";
// flow ramp related setting keys
// Default value to feature enable setting. To be backward compatible, this value === FALSE
public static final String AZKABAN_RAMP_ENABLED = "azkaban.ramp.enabled";
// Due to multiple AzkabanExec Server instance scenario, it will be required to persistent the ramp result into the DB.
// However, Frequent data persistence will sacrifice the performance with limited data accuracy.
// This setting value controls to push result into DB every N finished ramped workflows
public static final String AZKABAN_RAMP_STATUS_PUSH_INTERVAL_MAX = "azkaban.ramp.status.push.interval.max";
// Due to multiple AzkabanExec Server instance, it will be required to persistent the ramp result into the DB.
// However, Frequent data persistence will sacrifice the performance with limited data accuracy.
// This setting value controls to pull result from DB every N new ramped workflows
public static final String AZKABAN_RAMP_STATUS_PULL_INTERVAL_MAX = "azkaban.ramp.status.pull.interval.max";
// A Polling Service can be applied to determine the ramp status synchronization interval.
public static final String AZKABAN_RAMP_STATUS_POLLING_ENABLED = "azkaban.ramp.status.polling.enabled";
public static final String AZKABAN_RAMP_STATUS_POLLING_INTERVAL = "azkaban.ramp.status.polling.interval";
public static final String AZKABAN_RAMP_STATUS_POLLING_CPU_MAX = "azkaban.ramp.status.polling.cpu.max";
public static final String AZKABAN_RAMP_STATUS_POLLING_MEMORY_MIN = "azkaban.ramp.status.polling.memory.min";
public static final String EXECUTION_LOGS_RETENTION_MS = "execution.logs.retention.ms";
public static final String EXECUTION_LOGS_CLEANUP_INTERVAL_SECONDS =
"execution.logs.cleanup.interval.seconds";
public static final String EXECUTION_LOGS_CLEANUP_RECORD_LIMIT =
"execution.logs.cleanup.record.limit";
// Oauth2.0 configuration keys. If missing, no OAuth will be attempted, and the old
// username/password{+2FA} prompt will be given for interactive login:
public static final String OAUTH_PROVIDER_URI_KEY = "oauth.provider_uri"; // where to send user for OAuth flow, e.g.:
// oauth.provider_uri=https://login.microsoftonline.com/tenant-id/oauth2/v2.0/authorize\
// ?client_id=client_id\
// &response_type=code\
// &scope=openid\
// &response_mode=form_post\
// &state={state}\
// &redirect_uri={redirect_uri}
// Strings {state} and {redirect_uri}, if present verbatim in the property value, will be
// substituted at runtime with (URL-encoded) navigation target and OAuth responce handler URIs,
// respectively. See handleOauth() in LoginAbstractServlet.java for details.
public static final String OAUTH_REDIRECT_URI_KEY = "oauth.redirect_uri"; // how OAuth calls us back, e.g.:
// oauth.redirect_uri=http://localhost:8081/?action=oauth_callback
}
public static class FlowProperties {
// Basic properties of flows as set by the executor server
public static final String AZKABAN_FLOW_PROJECT_NAME = "azkaban.flow.projectname";
public static final String AZKABAN_FLOW_FLOW_ID = "azkaban.flow.flowid";
public static final String AZKABAN_FLOW_SUBMIT_USER = "azkaban.flow.submituser";
public static final String AZKABAN_FLOW_EXEC_ID = "azkaban.flow.execid";
public static final String AZKABAN_FLOW_PROJECT_VERSION = "azkaban.flow.projectversion";
}
public static class JobProperties {
// Job property that enables/disables using Kafka logging of user job logs
public static final String AZKABAN_JOB_LOGGING_KAFKA_ENABLE = "azkaban.job.logging.kafka.enable";
/*
* this parameter is used to replace EXTRA_HCAT_LOCATION that could fail when one of the uris is not available.
* EXTRA_HCAT_CLUSTERS has the following format:
* other_hcat_clusters = "thrift://hcat1:port,thrift://hcat2:port;thrift://hcat3:port,thrift://hcat4:port"
* Each string in the parenthesis is regarded as a "cluster", and we will get a delegation token from each cluster.
* The uris(hcat servers) in a "cluster" ensures HA is provided.
**/
public static final String EXTRA_HCAT_CLUSTERS = "azkaban.job.hive.other_hcat_clusters";
/*
* the settings to be defined by user indicating if there are hcat locations other than the
* default one the system should pre-fetch hcat token from. Note: Multiple thrift uris are
* supported, use comma to separate the values, values are case insensitive.
**/
// Use EXTRA_HCAT_CLUSTERS instead
@Deprecated
public static final String EXTRA_HCAT_LOCATION = "other_hcat_location";
// If true, AZ will fetches the jobs' certificate from remote Certificate Authority.
public static final String ENABLE_JOB_SSL = "azkaban.job.enable.ssl";
// If true, AZ will fetch OAuth token from credential provider
public static final String ENABLE_OAUTH = "azkaban.enable.oauth";
// Job properties that indicate maximum memory size
public static final String JOB_MAX_XMS = "job.max.Xms";
public static final String MAX_XMS_DEFAULT = "1G";
public static final String JOB_MAX_XMX = "job.max.Xmx";
public static final String MAX_XMX_DEFAULT = "2G";
// The hadoop user the job should run under. If not specified, it will default to submit user.
public static final String USER_TO_PROXY = "user.to.proxy";
/**
* Format string for Log4j's EnhancedPatternLayout
*/
public static final String JOB_LOG_LAYOUT = "azkaban.job.log.layout";
}
public static class JobCallbackProperties {
public static final String JOBCALLBACK_CONNECTION_REQUEST_TIMEOUT = "jobcallback.connection.request.timeout";
public static final String JOBCALLBACK_CONNECTION_TIMEOUT = "jobcallback.connection.timeout";
public static final String JOBCALLBACK_SOCKET_TIMEOUT = "jobcallback.socket.timeout";
public static final String JOBCALLBACK_RESPONSE_WAIT_TIMEOUT = "jobcallback.response.wait.timeout";
public static final String JOBCALLBACK_THREAD_POOL_SIZE = "jobcallback.thread.pool.size";
}
public static class FlowTriggerProps {
// Flow trigger props
public static final String SCHEDULE_TYPE = "type";
public static final String CRON_SCHEDULE_TYPE = "cron";
public static final String SCHEDULE_VALUE = "value";
public static final String DEP_NAME = "name";
// Flow trigger dependency run time props
public static final String START_TIME = "startTime";
public static final String TRIGGER_INSTANCE_ID = "triggerInstanceId";
}
public static class PluginManager {
public static final String JOBTYPE_DEFAULTDIR = "plugins/jobtypes";
public static final String RAMPPOLICY_DEFAULTDIR = "plugins/ramppolicies";
// need jars.to.include property, will be loaded with user property
public static final String CONFFILE = "plugin.properties";
// not exposed to users
public static final String SYSCONFFILE = "private.properties";
// common properties for multiple plugins
public static final String COMMONCONFFILE = "common.properties";
// common private properties for multiple plugins
public static final String COMMONSYSCONFFILE = "commonprivate.properties";
}
public static class ContainerizedDispatchManagerProperties {
public static final String AZKABAN_CONTAINERIZED_PREFIX = "azkaban.containerized.";
public static final String CONTAINERIZED_IMPL_TYPE = AZKABAN_CONTAINERIZED_PREFIX + "impl.type";
public static final String CONTAINERIZED_EXECUTION_BATCH_ENABLED =
AZKABAN_CONTAINERIZED_PREFIX + "execution.batch.enabled";
public static final String CONTAINERIZED_EXECUTION_BATCH_SIZE = AZKABAN_CONTAINERIZED_PREFIX +
"execution.batch.size";
public static final String CONTAINERIZED_EXECUTION_PROCESSING_THREAD_POOL_SIZE =
AZKABAN_CONTAINERIZED_PREFIX + "execution.processing.thread.pool.size";
public static final String CONTAINERIZED_CREATION_RATE_LIMIT =
AZKABAN_CONTAINERIZED_PREFIX + "creation.rate.limit";
// Kubernetes related properties
public static final String AZKABAN_KUBERNETES_PREFIX = "azkaban.kubernetes.";
public static final String KUBERNETES_NAMESPACE = AZKABAN_KUBERNETES_PREFIX + "namespace";
public static final String KUBERNETES_KUBE_CONFIG_PATH = AZKABAN_KUBERNETES_PREFIX +
"kube.config.path";
// Kubernetes pod related properties
public static final String KUBERNETES_POD_PREFIX = AZKABAN_KUBERNETES_PREFIX + "pod.";
public static final String KUBERNETES_POD_NAME_PREFIX = KUBERNETES_POD_PREFIX + "name.prefix";
// Kubernetes flow container related properties
public static final String KUBERNETES_FLOW_CONTAINER_PREFIX = AZKABAN_KUBERNETES_PREFIX +
"flow.container.";
public static final String KUBERNETES_FLOW_CONTAINER_NAME =
KUBERNETES_FLOW_CONTAINER_PREFIX + ".name";
public static final String KUBERNETES_FLOW_CONTAINER_CPU_LIMIT =
KUBERNETES_FLOW_CONTAINER_PREFIX +
"cpu.limit";
public static final String KUBERNETES_FLOW_CONTAINER_CPU_REQUEST =
KUBERNETES_FLOW_CONTAINER_PREFIX +
"cpu.request";
public static final String KUBERNETES_FLOW_CONTAINER_MEMORY_LIMIT =
KUBERNETES_FLOW_CONTAINER_PREFIX +
"memory.limit";
public static final String KUBERNETES_FLOW_CONTAINER_MEMORY_REQUEST =
KUBERNETES_FLOW_CONTAINER_PREFIX + "memory.request";
// Kubernetes service related properties
public static final String KUBERNETES_SERVICE_PREFIX = AZKABAN_KUBERNETES_PREFIX + "service.";
public static final String KUBERNETES_SERVICE_REQUIRED = KUBERNETES_SERVICE_PREFIX +
"required";
public static final String KUBERNETES_SERVICE_NAME_PREFIX = KUBERNETES_SERVICE_PREFIX +
"name.prefix";
public static final String KUBERNETES_SERVICE_PORT = KUBERNETES_SERVICE_PREFIX + "port";
public static final String KUBERNETES_SERVICE_CREATION_TIMEOUT_MS = KUBERNETES_SERVICE_PREFIX +
"creation.timeout.ms";
// Periodicity of lookup and cleanup of stale executions.
public static final String CONTAINERIZED_STALE_EXECUTION_CLEANUP_INTERVAL_MIN =
AZKABAN_CONTAINERIZED_PREFIX + "stale.execution.cleanup.interval.min";
}
}
| 1 | 21,006 | This should belong to FlowContainer class. | azkaban-azkaban | java |
@@ -412,7 +412,10 @@ class NVDAHighlighter(providerBase.VisionEnhancementProvider):
window = self.window = self.customWindowClass(self)
self.timer = winUser.WinTimer(window.handle, 0, self._refreshInterval, None)
msg = MSG()
- while winUser.getMessage(byref(msg), None, 0, 0):
+ # Python 3.8 note, Change this to use an Assignment expression to catch a return value of -1.
+ # See the remarks section of
+ # https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-getmessage
+ while winUser.getMessage(byref(msg), None, 0, 0) > 0:
winUser.user32.TranslateMessage(byref(msg))
winUser.user32.DispatchMessageW(byref(msg))
if vision._isDebug(): | 1 | # visionEnhancementProviders/NVDAHighlighter.py
# A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2018-2019 NV Access Limited, Babbage B.V., Takuya Nishimoto
"""Default highlighter based on GDI Plus."""
from typing import Optional, Tuple
from autoSettingsUtils.autoSettings import SupportedSettingType
import vision
from vision.constants import Context
from vision.util import getContextRect
from vision.visionHandlerExtensionPoints import EventExtensionPoints
from vision import providerBase
from windowUtils import CustomWindow
import wx
import gui
import api
from ctypes import byref, WinError
from ctypes.wintypes import COLORREF, MSG
import winUser
from logHandler import log
from mouseHandler import getTotalWidthAndHeightAndMinimumPosition
from locationHelper import RectLTWH
from collections import namedtuple
import threading
import winGDI
import weakref
from colors import RGB
import core
import driverHandler
class HighlightStyle(
namedtuple("HighlightStyle", ("color", "width", "style", "margin"))
):
"""Represents the style of a highlight for a particular context.
@ivar color: The color to use for the style
@type color: L{RGB}
@ivar width: The width of the lines to be drawn, in pixels.
A higher width reduces the inner dimensions of the rectangle.
Therefore, if you need to increase the outer dimensions of the rectangle,
you need to increase the margin as well.
@type width: int
@ivar style: The style of the lines to be drawn;
One of the C{winGDI.DashStyle*} enumeration constants.
@type style: int
@ivar margin: The number of pixels between the highlight's rectangle
and the rectangle of the object to be highlighted.
A higher margin stretches the highlight's rectangle.
This value may also be negative.
@type margin: int
"""
BLUE = RGB(0x03, 0x36, 0xFF)
PINK = RGB(0xFF, 0x02, 0x66)
YELLOW = RGB(0xFF, 0xDE, 0x03)
DASH_BLUE = HighlightStyle(BLUE, 5, winGDI.DashStyleDash, 5)
SOLID_PINK = HighlightStyle(PINK, 5, winGDI.DashStyleSolid, 5)
SOLID_BLUE = HighlightStyle(BLUE, 5, winGDI.DashStyleSolid, 5)
SOLID_YELLOW = HighlightStyle(YELLOW, 2, winGDI.DashStyleSolid, 2)
class HighlightWindow(CustomWindow):
transparency = 0xff
className = u"NVDAHighlighter"
windowName = u"NVDA Highlighter Window"
windowStyle = winUser.WS_POPUP | winUser.WS_DISABLED
extendedWindowStyle = winUser.WS_EX_TOPMOST | winUser.WS_EX_LAYERED
transparentColor = 0 # Black
@classmethod
def _get__wClass(cls):
wClass = super()._wClass
wClass.style = winUser.CS_HREDRAW | winUser.CS_VREDRAW
wClass.hbrBackground = winGDI.gdi32.CreateSolidBrush(COLORREF(cls.transparentColor))
return wClass
def updateLocationForDisplays(self):
if vision._isDebug():
log.debug("Updating NVDAHighlighter window location for displays")
displays = [wx.Display(i).GetGeometry() for i in range(wx.Display.GetCount())]
screenWidth, screenHeight, minPos = getTotalWidthAndHeightAndMinimumPosition(displays)
# Hack: Windows has a "feature" that will stop desktop shortcut hotkeys from working
# when a window is full screen.
# Removing one line of pixels from the bottom of the screen will fix this.
left = minPos.x
top = minPos.y
width = screenWidth
height = screenHeight - 1
self.location = RectLTWH(left, top, width, height)
winUser.user32.ShowWindow(self.handle, winUser.SW_HIDE)
if not winUser.user32.SetWindowPos(
self.handle,
winUser.HWND_TOPMOST,
left, top, width, height,
winUser.SWP_NOACTIVATE
):
raise WinError()
winUser.user32.ShowWindow(self.handle, winUser.SW_SHOWNA)
def __init__(self, highlighter):
if vision._isDebug():
log.debug("initializing NVDAHighlighter window")
super(HighlightWindow, self).__init__(
windowName=self.windowName,
windowStyle=self.windowStyle,
extendedWindowStyle=self.extendedWindowStyle,
parent=gui.mainFrame.Handle
)
self.location = None
self.highlighterRef = weakref.ref(highlighter)
winUser.SetLayeredWindowAttributes(
self.handle,
self.transparentColor,
self.transparency,
winUser.LWA_ALPHA | winUser.LWA_COLORKEY)
self.updateLocationForDisplays()
if not winUser.user32.UpdateWindow(self.handle):
raise WinError()
def windowProc(self, hwnd, msg, wParam, lParam):
if msg == winUser.WM_PAINT:
self._paint()
# Ensure the window is top most
winUser.user32.SetWindowPos(
self.handle,
winUser.HWND_TOPMOST,
0, 0, 0, 0,
winUser.SWP_NOACTIVATE | winUser.SWP_NOMOVE | winUser.SWP_NOSIZE
)
elif msg == winUser.WM_DESTROY:
winUser.user32.PostQuitMessage(0)
elif msg == winUser.WM_TIMER:
self.refresh()
elif msg == winUser.WM_DISPLAYCHANGE:
# wx might not be aware of the display change at this point
core.callLater(100, self.updateLocationForDisplays)
def _paint(self):
highlighter = self.highlighterRef()
if not highlighter:
# The highlighter instance died unexpectedly, kill the window as well
winUser.user32.PostQuitMessage(0)
return
contextRects = {}
for context in highlighter.enabledContexts:
rect = highlighter.contextToRectMap.get(context)
if not rect:
continue
elif context == Context.NAVIGATOR and contextRects.get(Context.FOCUS) == rect:
# When the focus overlaps the navigator object, which is usually the case,
# show a different highlight style.
# Focus is in contextRects, do not show the standalone focus highlight.
contextRects.pop(Context.FOCUS)
# Navigator object might be in contextRects as well
contextRects.pop(Context.NAVIGATOR, None)
context = Context.FOCUS_NAVIGATOR
contextRects[context] = rect
if not contextRects:
return
with winUser.paint(self.handle) as hdc:
with winGDI.GDIPlusGraphicsContext(hdc) as graphicsContext:
for context, rect in contextRects.items():
HighlightStyle = highlighter._ContextStyles[context]
# Before calculating logical coordinates,
# make sure the rectangle falls within the highlighter window
rect = rect.intersection(self.location)
try:
rect = rect.toLogical(self.handle)
except RuntimeError:
log.debugWarning("", exc_info=True)
rect = rect.toClient(self.handle)
try:
rect = rect.expandOrShrink(HighlightStyle.margin)
except RuntimeError:
pass
with winGDI.GDIPlusPen(
HighlightStyle.color.toGDIPlusARGB(),
HighlightStyle.width,
HighlightStyle.style
) as pen:
winGDI.gdiPlusDrawRectangle(graphicsContext, pen, *rect.toLTWH())
def refresh(self):
winUser.user32.InvalidateRect(self.handle, None, True)
_contextOptionLabelsWithAccelerators = {
# Translators: shown for a highlighter setting that toggles
# highlighting the system focus.
Context.FOCUS: _("Highlight system fo&cus"),
# Translators: shown for a highlighter setting that toggles
# highlighting the browse mode cursor.
Context.BROWSEMODE: _("Highlight browse &mode cursor"),
# Translators: shown for a highlighter setting that toggles
# highlighting the navigator object.
Context.NAVIGATOR: _("Highlight navigator &object"),
}
_supportedContexts = (Context.FOCUS, Context.NAVIGATOR, Context.BROWSEMODE)
class NVDAHighlighterSettings(providerBase.VisionEnhancementProviderSettings):
# Default settings for parameters
highlightFocus = False
highlightNavigator = False
highlightBrowseMode = False
@classmethod
def getId(cls) -> str:
return "NVDAHighlighter"
@classmethod
def getDisplayName(cls) -> str:
# Translators: Description for NVDA's built-in screen highlighter.
return _("Focus Highlight")
def _get_supportedSettings(self) -> SupportedSettingType:
return [
driverHandler.BooleanDriverSetting(
'highlight%s' % (context[0].upper() + context[1:]),
_contextOptionLabelsWithAccelerators[context],
defaultVal=True
)
for context in _supportedContexts
]
class NVDAHighlighterGuiPanel(
gui.AutoSettingsMixin,
gui.SettingsPanel
):
_enableCheckSizer: wx.BoxSizer
_enabledCheckbox: wx.CheckBox
from gui.settingsDialogs import VisionProviderStateControl
def __init__(
self,
parent: wx.Window,
providerControl: VisionProviderStateControl
):
self._providerControl = providerControl
initiallyEnabledInConfig = providerControl._providerInfo.providerClass.isEnabledInConfig()
if not initiallyEnabledInConfig:
settingsStorage = self._getSettingsStorage()
settingsToCheck = [
settingsStorage.highlightBrowseMode,
settingsStorage.highlightFocus,
settingsStorage.highlightNavigator,
]
if any(settingsToCheck):
log.debugWarning(
"Highlighter disabled in config while some of its settings are enabled. "
"This will be corrected"
)
settingsStorage.highlightBrowseMode = False
settingsStorage.highlightFocus = False
settingsStorage.highlightNavigator = False
super().__init__(parent)
def _buildGui(self):
self.mainSizer = wx.BoxSizer(wx.VERTICAL)
self._enabledCheckbox = wx.CheckBox(
self,
# Translators: The label for a checkbox that enables / disables focus highlighting
# in the NVDA Highlighter vision settings panel.
label=_("&Enable Highlighting"),
style=wx.CHK_3STATE
)
self.mainSizer.Add(self._enabledCheckbox)
self.mainSizer.AddSpacer(size=self.scaleSize(10))
# this options separator is done with text rather than a group box because a groupbox is too verbose,
# but visually some separation is helpful, since the rest of the options are really sub-settings.
self.optionsText = wx.StaticText(
self,
# Translators: The label for a group box containing the NVDA highlighter options.
label=_("Options:")
)
self.mainSizer.Add(self.optionsText)
self.lastControl = self.optionsText
self.settingsSizer = wx.BoxSizer(wx.VERTICAL)
self.makeSettings(self.settingsSizer)
self.mainSizer.Add(self.settingsSizer, border=self.scaleSize(15), flag=wx.LEFT | wx.EXPAND)
self.mainSizer.Fit(self)
self.SetSizer(self.mainSizer)
def getSettings(self) -> NVDAHighlighterSettings:
# AutoSettingsMixin uses the getSettings method (via getSettingsStorage) to get the instance which is
# used to get / set attributes. The attributes must match the id's of the settings.
# We want them set on our settings instance.
return VisionEnhancementProvider.getSettings()
def makeSettings(self, sizer: wx.BoxSizer):
self.updateDriverSettings()
# bind to all check box events
self.Bind(wx.EVT_CHECKBOX, self._onCheckEvent)
self._updateEnabledState()
def onPanelActivated(self):
self.lastControl = self.optionsText
def _updateEnabledState(self):
settings = self._getSettingsStorage()
settingsToTriggerActivation = [
settings.highlightBrowseMode,
settings.highlightFocus,
settings.highlightNavigator,
]
if any(settingsToTriggerActivation):
if all(settingsToTriggerActivation):
self._enabledCheckbox.Set3StateValue(wx.CHK_CHECKED)
else:
self._enabledCheckbox.Set3StateValue(wx.CHK_UNDETERMINED)
self._ensureEnableState(True)
else:
self._enabledCheckbox.Set3StateValue(wx.CHK_UNCHECKED)
self._ensureEnableState(False)
def _ensureEnableState(self, shouldBeEnabled: bool):
currentlyEnabled = bool(self._providerControl.getProviderInstance())
if shouldBeEnabled and not currentlyEnabled:
self._providerControl.startProvider()
elif not shouldBeEnabled and currentlyEnabled:
self._providerControl.terminateProvider()
def _onCheckEvent(self, evt: wx.CommandEvent):
settingsStorage = self._getSettingsStorage()
if evt.GetEventObject() is self._enabledCheckbox:
settingsStorage.highlightBrowseMode = evt.IsChecked()
settingsStorage.highlightFocus = evt.IsChecked()
settingsStorage.highlightNavigator = evt.IsChecked()
self._ensureEnableState(evt.IsChecked())
self.updateDriverSettings()
else:
self._updateEnabledState()
providerInst: Optional[NVDAHighlighter] = self._providerControl.getProviderInstance()
if providerInst:
providerInst.refresh()
class NVDAHighlighter(providerBase.VisionEnhancementProvider):
_ContextStyles = {
Context.FOCUS: DASH_BLUE,
Context.NAVIGATOR: SOLID_PINK,
Context.FOCUS_NAVIGATOR: SOLID_BLUE,
Context.BROWSEMODE: SOLID_YELLOW,
}
_refreshInterval = 100
customWindowClass = HighlightWindow
_settings = NVDAHighlighterSettings()
enabledContexts: Tuple[Context] # type info for autoprop: L{_get_enableContexts}
@classmethod # override
def getSettings(cls) -> NVDAHighlighterSettings:
return cls._settings
@classmethod # override
def getSettingsPanelClass(cls):
"""Returns the class to be used in order to construct a settings panel for the provider.
@return: Optional[SettingsPanel]
@remarks: When None is returned, L{gui.settingsDialogs.VisionProviderSubPanel_Wrapper} is used.
"""
return NVDAHighlighterGuiPanel
@classmethod # override
def canStart(cls) -> bool:
return True
def registerEventExtensionPoints( # override
self,
extensionPoints: EventExtensionPoints
) -> None:
extensionPoints.post_focusChange.register(self.handleFocusChange)
extensionPoints.post_reviewMove.register(self.handleReviewMove)
extensionPoints.post_browseModeMove.register(self.handleBrowseModeMove)
def __init__(self):
super().__init__()
log.debug("Starting NVDAHighlighter")
self.contextToRectMap = {}
winGDI.gdiPlusInitialize()
self.window = None
self._highlighterThread = threading.Thread(
name=f"{self.__class__.__module__}.{self.__class__.__qualname__}",
target=self._run
)
self._highlighterThread.daemon = True
self._highlighterThread.start()
def terminate(self):
log.debug("Terminating NVDAHighlighter")
if self._highlighterThread:
if not winUser.user32.PostThreadMessageW(self._highlighterThread.ident, winUser.WM_QUIT, 0, 0):
raise WinError()
self._highlighterThread.join()
self._highlighterThread = None
winGDI.gdiPlusTerminate()
self.contextToRectMap.clear()
super().terminate()
def _run(self):
if vision._isDebug():
log.debug("Starting NVDAHighlighter thread")
window = self.window = self.customWindowClass(self)
self.timer = winUser.WinTimer(window.handle, 0, self._refreshInterval, None)
msg = MSG()
while winUser.getMessage(byref(msg), None, 0, 0):
winUser.user32.TranslateMessage(byref(msg))
winUser.user32.DispatchMessageW(byref(msg))
if vision._isDebug():
log.debug("Quit message received on NVDAHighlighter thread")
if self.timer:
self.timer.terminate()
self.timer = None
if self.window:
self.window.destroy()
self.window = None
def updateContextRect(self, context, rect=None, obj=None):
"""Updates the position rectangle of the highlight for the specified context.
If rect is specified, the method directly writes the rectangle to the contextToRectMap.
Otherwise, it will call L{getContextRect}
"""
if context not in self.enabledContexts:
return
if rect is None:
try:
rect = getContextRect(context, obj=obj)
except (LookupError, NotImplementedError, RuntimeError, TypeError):
rect = None
self.contextToRectMap[context] = rect
def handleFocusChange(self, obj):
self.updateContextRect(context=Context.FOCUS, obj=obj)
if not api.isObjectInActiveTreeInterceptor(obj):
self.contextToRectMap.pop(Context.BROWSEMODE, None)
else:
self.handleBrowseModeMove()
def handleReviewMove(self, context):
self.updateContextRect(context=Context.NAVIGATOR)
def handleBrowseModeMove(self, obj=None):
self.updateContextRect(context=Context.BROWSEMODE)
def refresh(self):
"""Refreshes the screen positions of the enabled highlights.
"""
if self.window:
self.window.refresh()
def _get_enabledContexts(self):
"""Gets the contexts for which the highlighter is enabled.
"""
return tuple(
context for context in _supportedContexts
if getattr(self.getSettings(), 'highlight%s' % (context[0].upper() + context[1:]))
)
VisionEnhancementProvider = NVDAHighlighter
| 1 | 28,049 | I think there is a chance that on disable/enable cycling, referring to `self.window` is problematic. In the `__init__` method `self.window` is set to None. If `__init__` gets called before `_run` exits the `while` loop then we wont call `self.window.destroy()`. Can you use non-instance variables after this point? I don't think `self.timer` is used outside this function, if so, then I'd prefer it also be a local variable. | nvaccess-nvda | py |
@@ -216,6 +216,12 @@ func TestConfigCommandInteractiveCreateDocrootDenied(t *testing.T) {
// Set up tests and give ourselves a working directory.
assert := asrt.New(t)
+ noninteractive := "DRUD_NONINTERACTIVE"
+ // nolint: errcheck
+ defer os.Setenv(noninteractive, os.Getenv(noninteractive))
+ err := os.Unsetenv(noninteractive)
+ assert.NoError(err)
+
testMatrix := map[string][]string{
"drupal6phpversion": {AppTypeDrupal6, PHP56},
"drupal7phpversion": {AppTypeDrupal7, PHP71}, | 1 | package ddevapp_test
import (
"bufio"
"fmt"
"github.com/drud/ddev/pkg/exec"
"github.com/stretchr/testify/require"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
. "github.com/drud/ddev/pkg/ddevapp"
"github.com/drud/ddev/pkg/fileutil"
"github.com/drud/ddev/pkg/testcommon"
"github.com/drud/ddev/pkg/util"
"github.com/drud/ddev/pkg/version"
"github.com/google/uuid"
asrt "github.com/stretchr/testify/assert"
)
// TestNewConfig tests functionality around creating a new config, writing it to disk, and reading the resulting config.
func TestNewConfig(t *testing.T) {
assert := asrt.New(t)
// Create a temporary directory and change to it for the duration of this test.
testDir := testcommon.CreateTmpDir("TestNewConfig")
defer testcommon.CleanupDir(testDir)
defer testcommon.Chdir(testDir)()
// Load a new Config
app, err := NewApp(testDir, ProviderDefault)
assert.NoError(err)
// Ensure the config uses specified defaults.
assert.Equal(app.APIVersion, version.DdevVersion)
assert.Equal(app.DBImage, version.DBImg+":"+version.DBTag)
assert.Equal(app.WebImage, version.WebImg+":"+version.WebTag)
assert.Equal(app.DBAImage, version.DBAImg+":"+version.DBATag)
app.Name = util.RandString(32)
app.Type = AppTypeDrupal8
// WriteConfig the app.
err = app.WriteConfig()
assert.NoError(err)
_, err = os.Stat(app.ConfigPath)
assert.NoError(err)
loadedConfig, err := NewApp(testDir, ProviderDefault)
assert.NoError(err)
assert.Equal(app.Name, loadedConfig.Name)
assert.Equal(app.Type, loadedConfig.Type)
}
// TestAllowedAppType tests the IsAllowedAppType function.
func TestAllowedAppTypes(t *testing.T) {
assert := asrt.New(t)
for _, v := range GetValidAppTypes() {
assert.True(IsValidAppType(v))
}
for i := 1; i <= 50; i++ {
randomType := util.RandString(32)
assert.False(IsValidAppType(randomType))
}
}
// TestPrepDirectory ensures the configuration directory can be created with the correct permissions.
func TestPrepDirectory(t *testing.T) {
assert := asrt.New(t)
// Create a temporary directory and change to it for the duration of this test.
testDir := testcommon.CreateTmpDir("TestPrepDirectory")
defer testcommon.CleanupDir(testDir)
defer testcommon.Chdir(testDir)()
app, err := NewApp(testDir, ProviderDefault)
assert.NoError(err)
// Prep the directory.
err = PrepDdevDirectory(filepath.Dir(app.ConfigPath))
assert.NoError(err)
// Read directory info an ensure it exists.
_, err = os.Stat(filepath.Dir(app.ConfigPath))
assert.NoError(err)
}
// TestHostName tests that the TestSite.Hostname() field returns the hostname as expected.
func TestHostName(t *testing.T) {
assert := asrt.New(t)
testDir := testcommon.CreateTmpDir("TestHostName")
defer testcommon.CleanupDir(testDir)
defer testcommon.Chdir(testDir)()
app, err := NewApp(testDir, ProviderDefault)
assert.NoError(err)
app.Name = util.RandString(32)
assert.Equal(app.GetHostname(), app.Name+"."+version.DDevTLD)
}
// TestWriteDockerComposeYaml tests the writing of a docker-compose.yaml file.
func TestWriteDockerComposeYaml(t *testing.T) {
// Set up tests and give ourselves a working directory.
assert := asrt.New(t)
testDir := testcommon.CreateTmpDir("TestWriteDockerCompose")
defer testcommon.CleanupDir(testDir)
defer testcommon.Chdir(testDir)()
app, err := NewApp(testDir, ProviderDefault)
assert.NoError(err)
app.Name = util.RandString(32)
app.Type = GetValidAppTypes()[0]
// WriteConfig a config to create/prep necessary directories.
err = app.WriteConfig()
assert.NoError(err)
// After the config has been written and directories exist, the write should work.
err = app.WriteDockerComposeConfig()
assert.NoError(err)
// Ensure we can read from the file and that it's a regular file with the expected name.
fileinfo, err := os.Stat(app.DockerComposeYAMLPath())
assert.NoError(err)
assert.False(fileinfo.IsDir())
assert.Equal(fileinfo.Name(), filepath.Base(app.DockerComposeYAMLPath()))
composeBytes, err := ioutil.ReadFile(app.DockerComposeYAMLPath())
assert.NoError(err)
contentString := string(composeBytes)
assert.Contains(contentString, app.Platform)
assert.Contains(contentString, app.Type)
}
// TestConfigCommand tests the interactive config options.
func TestConfigCommand(t *testing.T) {
// Set up tests and give ourselves a working directory.
assert := asrt.New(t)
const apptypePos = 0
const phpVersionPos = 1
testMatrix := map[string][]string{
"drupal6phpversion": {AppTypeDrupal6, PHP56},
"drupal7phpversion": {AppTypeDrupal7, PHP71},
"drupal8phpversion": {AppTypeDrupal8, PHP71},
}
for testName, testValues := range testMatrix {
testDir := testcommon.CreateTmpDir("TestConfigCommand_" + testName)
// testcommon.Chdir()() and CleanupDir() checks their own errors (and exit)
defer testcommon.CleanupDir(testDir)
defer testcommon.Chdir(testDir)()
// Create a docroot folder.
err := os.Mkdir(filepath.Join(testDir, "docroot"), 0644)
if err != nil {
t.Errorf("Could not create docroot directory under %s", testDir)
}
// Create the ddevapp we'll use for testing.
// This will not return an error, since there is no existing configuration.
app, err := NewApp(testDir, ProviderDefault)
assert.NoError(err)
// Randomize some values to use for Stdin during testing.
name := strings.ToLower(util.RandString(16))
invalidAppType := strings.ToLower(util.RandString(8))
// Create an example input buffer that writes the sitename, a valid document root,
// an invalid app type, and finally a valid app type (from test matrix)
input := fmt.Sprintf("%s\ndocroot\n%s\n%s", name, invalidAppType, testValues[apptypePos])
scanner := bufio.NewScanner(strings.NewReader(input))
util.SetInputScanner(scanner)
restoreOutput := util.CaptureUserOut()
err = app.PromptForConfig()
assert.NoError(err, t)
out := restoreOutput()
// Ensure we have expected vales in output.
assert.Contains(out, testDir)
assert.Contains(out, fmt.Sprintf("'%s' is not a valid project type", invalidAppType))
// Create an example input buffer that writes an invalid projectname, then a valid-project-name,
// a valid document root,
// a valid app type
input = fmt.Sprintf("invalid_project_name\n%s\ndocroot\n%s", name, testValues[apptypePos])
scanner = bufio.NewScanner(strings.NewReader(input))
util.SetInputScanner(scanner)
restoreOutput = util.CaptureUserOut()
err = app.PromptForConfig()
assert.NoError(err, t)
out = restoreOutput()
// Ensure we have expected vales in output.
assert.Contains(out, testDir)
assert.Contains(out, "invalid_project_name is not a valid project name")
// Ensure values were properly set on the app struct.
assert.Equal(name, app.Name)
assert.Equal(testValues[apptypePos], app.Type)
assert.Equal("docroot", app.Docroot)
assert.EqualValues(testValues[phpVersionPos], app.PHPVersion)
err = PrepDdevDirectory(testDir)
assert.NoError(err)
}
}
// TestConfigCommandInteractiveCreateDocrootDenied
func TestConfigCommandInteractiveCreateDocrootDenied(t *testing.T) {
// Set up tests and give ourselves a working directory.
assert := asrt.New(t)
testMatrix := map[string][]string{
"drupal6phpversion": {AppTypeDrupal6, PHP56},
"drupal7phpversion": {AppTypeDrupal7, PHP71},
"drupal8phpversion": {AppTypeDrupal8, PHP71},
}
for testName := range testMatrix {
testDir := testcommon.CreateTmpDir(t.Name() + testName)
// testcommon.Chdir()() and CleanupDir() checks their own errors (and exit)
defer testcommon.CleanupDir(testDir)
defer testcommon.Chdir(testDir)()
// Create the ddevapp we'll use for testing.
// This will not return an error, since there is no existing configuration.
app, err := NewApp(testDir, ProviderDefault)
assert.NoError(err)
// Randomize some values to use for Stdin during testing.
name := uuid.New().String()
nonexistentDocroot := filepath.Join("does", "not", "exist")
// Create an example input buffer that writes the sitename, a nonexistent document root,
// and a "no"
input := fmt.Sprintf("%s\n%s\nno", name, nonexistentDocroot)
scanner := bufio.NewScanner(strings.NewReader(input))
util.SetInputScanner(scanner)
err = app.PromptForConfig()
assert.Error(err, t)
// Ensure we have expected vales in output.
assert.Contains(err.Error(), "docroot must exist to continue configuration")
err = PrepDdevDirectory(testDir)
assert.NoError(err)
}
}
// TestConfigCommandCreateDocrootAllowed
func TestConfigCommandCreateDocrootAllowed(t *testing.T) {
// Set up tests and give ourselves a working directory.
assert := asrt.New(t)
const apptypePos = 0
const phpVersionPos = 1
testMatrix := map[string][]string{
"drupal6phpversion": {AppTypeDrupal6, PHP56},
"drupal7phpversion": {AppTypeDrupal7, PHP71},
"drupal8phpversion": {AppTypeDrupal8, PHP71},
}
for testName, testValues := range testMatrix {
testDir := testcommon.CreateTmpDir(t.Name() + testName)
// testcommon.Chdir()() and CleanupDir() checks their own errors (and exit)
defer testcommon.CleanupDir(testDir)
defer testcommon.Chdir(testDir)()
// Create the ddevapp we'll use for testing.
// This will not return an error, since there is no existing configuration.
app, err := NewApp(testDir, ProviderDefault)
assert.NoError(err)
// Randomize some values to use for Stdin during testing.
name := uuid.New().String()
nonexistentDocroot := filepath.Join("does", "not", "exist")
// Create an example input buffer that writes the sitename, a nonexistent document root,
// a "yes", and a valid apptype
input := fmt.Sprintf("%s\n%s\nyes\n%s", name, nonexistentDocroot, testValues[apptypePos])
scanner := bufio.NewScanner(strings.NewReader(input))
util.SetInputScanner(scanner)
restoreOutput := util.CaptureUserOut()
err = app.PromptForConfig()
assert.NoError(err, t)
out := restoreOutput()
// Ensure we have expected vales in output.
assert.Contains(out, nonexistentDocroot)
assert.Contains(out, "Created docroot")
// Ensure values were properly set on the app struct.
assert.Equal(name, app.Name)
assert.Equal(testValues[apptypePos], app.Type)
assert.Equal(nonexistentDocroot, app.Docroot)
assert.EqualValues(testValues[phpVersionPos], app.PHPVersion)
err = PrepDdevDirectory(testDir)
assert.NoError(err)
}
}
// TestConfigCommandDocrootDetection asserts the default docroot is detected.
func TestConfigCommandDocrootDetection(t *testing.T) {
// Set up tests and give ourselves a working directory.
assert := asrt.New(t)
testMatrix := AvailableDocrootLocations()
for index, testDocrootName := range testMatrix {
testDir := testcommon.CreateTmpDir(fmt.Sprintf("TestConfigCommand_%v", index))
// testcommon.Chdir()() and CleanupDir() checks their own errors (and exit)
defer testcommon.CleanupDir(testDir)
defer testcommon.Chdir(testDir)()
// Create a document root folder.
err := os.MkdirAll(filepath.Join(testDir, filepath.Join(testDocrootName)), 0755)
if err != nil {
t.Errorf("Could not create %s directory under %s", testDocrootName, testDir)
}
_, err = os.OpenFile(filepath.Join(testDir, filepath.Join(testDocrootName), "index.php"), os.O_RDONLY|os.O_CREATE, 0664)
assert.NoError(err)
// Create the ddevapp we'll use for testing.
// This will not return an error, since there is no existing configuration.
app, err := NewApp(testDir, ProviderDefault)
assert.NoError(err)
// Randomize some values to use for Stdin during testing.
name := strings.ToLower(util.RandString(16))
// Create an example input buffer that writes the site name, accepts the
// default document root and provides a valid app type.
input := fmt.Sprintf("%s\n\ndrupal8", name)
scanner := bufio.NewScanner(strings.NewReader(input))
util.SetInputScanner(scanner)
restoreOutput := util.CaptureStdOut()
err = app.PromptForConfig()
assert.NoError(err, t)
out := restoreOutput()
assert.Contains(out, fmt.Sprintf("Docroot Location (%s)", testDocrootName))
// Ensure values were properly set on the app struct.
assert.Equal(name, app.Name)
assert.Equal(AppTypeDrupal8, app.Type)
assert.Equal(testDocrootName, app.Docroot)
err = PrepDdevDirectory(testDir)
assert.NoError(err)
}
}
// TestConfigCommandDocrootDetection asserts the default docroot is detected and has index.php.
// The `web` docroot check is before `docroot` this verifies the directory with an
// existing index.php is selected.
func TestConfigCommandDocrootDetectionIndexVerification(t *testing.T) {
// Set up tests and give ourselves a working directory.
assert := asrt.New(t)
testDir := testcommon.CreateTmpDir("TestConfigCommand_testDocrootIndex")
// testcommon.Chdir()() and CleanupDir() checks their own errors (and exit)
defer testcommon.CleanupDir(testDir)
defer testcommon.Chdir(testDir)()
// Create a document root folder.
err := os.MkdirAll(filepath.Join(testDir, filepath.Join("web")), 0755)
if err != nil {
t.Errorf("Could not create %s directory under %s", "web", testDir)
}
err = os.MkdirAll(filepath.Join(testDir, filepath.Join("docroot")), 0755)
if err != nil {
t.Errorf("Could not create %s directory under %s", "docroot", testDir)
}
_, err = os.OpenFile(filepath.Join(testDir, "docroot", "index.php"), os.O_RDONLY|os.O_CREATE, 0664)
assert.NoError(err)
// Create the ddevapp we'll use for testing.
// This will not return an error, since there is no existing configuration.
app, err := NewApp(testDir, ProviderDefault)
assert.NoError(err)
// Randomize some values to use for Stdin during testing.
name := strings.ToLower(util.RandString(16))
// Create an example input buffer that writes the site name, accepts the
// default document root and provides a valid app type.
input := fmt.Sprintf("%s\n\ndrupal8", name)
scanner := bufio.NewScanner(strings.NewReader(input))
util.SetInputScanner(scanner)
restoreOutput := util.CaptureStdOut()
err = app.PromptForConfig()
assert.NoError(err, t)
out := restoreOutput()
assert.Contains(out, fmt.Sprintf("Docroot Location (%s)", "docroot"))
// Ensure values were properly set on the app struct.
assert.Equal(name, app.Name)
assert.Equal(AppTypeDrupal8, app.Type)
assert.Equal("docroot", app.Docroot)
err = PrepDdevDirectory(testDir)
assert.NoError(err)
}
// TestReadConfig tests reading config values from file and fallback to defaults for values not exposed.
func TestReadConfig(t *testing.T) {
assert := asrt.New(t)
// This closely resembles the values one would have from NewApp()
app := &DdevApp{
APIVersion: version.DdevVersion,
ConfigPath: filepath.Join("testdata", "config.yaml"),
AppRoot: "testdata",
Name: "TestRead",
Provider: ProviderDefault,
}
err := app.ReadConfig()
if err != nil {
t.Fatalf("Unable to c.ReadConfig(), err: %v", err)
}
// Values not defined in file, we should still have default values
assert.Equal(app.Name, "TestRead")
assert.Equal(app.APIVersion, version.DdevVersion)
// Values defined in file, we should have values from file
assert.Equal(app.Type, AppTypeDrupal8)
assert.Equal(app.Docroot, "test")
assert.Equal(app.WebImage, "test/testimage:latest")
}
// TestValidate tests validation of configuration values.
func TestValidate(t *testing.T) {
assert := asrt.New(t)
cwd, err := os.Getwd()
assert.NoError(err)
app := &DdevApp{
Name: "TestValidate",
ConfigPath: filepath.Join("testdata", "config.yaml"),
AppRoot: cwd,
Docroot: "testdata",
Type: AppTypeWordPress,
PHPVersion: PHPDefault,
WebserverType: WebserverDefault,
Provider: ProviderDefault,
}
err = app.ValidateConfig()
if err != nil {
t.Fatalf("Failed to app.ValidateConfig(), err=%v", err)
}
app.Name = "Invalid!"
err = app.ValidateConfig()
assert.Error(err)
assert.Contains(err.Error(), "not a valid project name")
app.Docroot = "testdata"
app.Name = "valid"
app.Type = "potato"
err = app.ValidateConfig()
assert.Error(err)
assert.Contains(err.Error(), "invalid app type")
app.Type = AppTypeWordPress
app.PHPVersion = "1.1"
err = app.ValidateConfig()
assert.Error(err)
assert.Contains(err.Error(), "invalid PHP")
app.PHPVersion = PHPDefault
app.WebserverType = "server"
err = app.ValidateConfig()
assert.Error(err)
assert.Contains(err.Error(), "invalid webserver type")
app.WebserverType = WebserverDefault
app.AdditionalHostnames = []string{"good", "b@d"}
err = app.ValidateConfig()
assert.Error(err)
assert.Contains(err.Error(), "invalid hostname")
app.AdditionalHostnames = []string{}
app.AdditionalFQDNs = []string{"good.com", "[email protected]"}
err = app.ValidateConfig()
assert.Error(err)
assert.Contains(err.Error(), "invalid hostname")
}
// TestWriteConfig tests writing config values to file
func TestWriteConfig(t *testing.T) {
assert := asrt.New(t)
testDir := testcommon.CreateTmpDir("TestConfigWrite")
// This closely resembles the values one would have from NewApp()
app := &DdevApp{
ConfigPath: filepath.Join(testDir, "config.yaml"),
AppRoot: testDir,
APIVersion: version.DdevVersion,
Name: "TestWrite",
WebImage: version.WebImg + ":" + version.WebTag,
DBImage: version.DBImg + ":" + version.DBTag,
DBAImage: version.DBAImg + ":" + version.DBATag,
Type: AppTypeDrupal8,
Provider: ProviderDefault,
}
err := app.WriteConfig()
assert.NoError(err)
out, err := ioutil.ReadFile(filepath.Join(testDir, "config.yaml"))
assert.NoError(err)
assert.Contains(string(out), "TestWrite")
assert.Contains(string(out), `exec: drush cr`)
app.Type = AppTypeWordPress
err = app.WriteConfig()
assert.NoError(err)
out, err = ioutil.ReadFile(filepath.Join(testDir, "config.yaml"))
assert.NoError(err)
assert.Contains(string(out), `- exec: wp cli version`)
err = os.RemoveAll(testDir)
assert.NoError(err)
}
// TestConfigOverrideDetection tests to make sure we tell them about config overrides.
func TestConfigOverrideDetection(t *testing.T) {
assert := asrt.New(t)
testcommon.ClearDockerEnv()
testDir := testcommon.CreateTmpDir("TestConfigOverrideDetection")
targetDdev := filepath.Join(testDir, ".ddev")
err := fileutil.CopyDir("testdata/.ddev", targetDdev)
assert.NoError(err)
// testcommon.Chdir()() and CleanupDir() checks their own errors (and exit)
defer testcommon.CleanupDir(testDir)
defer testcommon.Chdir(testDir)()
app, err := NewApp(testDir, ProviderDefault)
assert.NoError(err)
err = app.ReadConfig()
assert.NoError(err)
restoreOutput := util.CaptureUserOut()
err = app.Start()
out := restoreOutput()
if strings.Contains(out, "ddev-ssh-agent failed to become ready") {
dockerLogs, err := exec.RunCommand("docker", []string{"logs", "ddev-ssh-agent"})
assert.NoError(err)
t.Logf("ddev-ssh-agent failed to become ready, docker logs:===\n%s\n===", dockerLogs)
}
require.NoError(t, err, "app.Start() did not succeed: output:===\n%s\n===", out)
assert.Contains(out, "utf.cnf")
assert.Contains(out, "my-php.ini")
switch app.WebserverType {
case WebserverNginxFPM:
assert.Contains(out, "nginx-site.conf")
assert.NotContains(out, "apache-site.conf")
default:
assert.Contains(out, "apache-site.conf")
assert.NotContains(out, "nginx-site.conf")
}
assert.Contains(out, "Custom configuration takes effect")
_ = app.Down(true, false)
}
| 1 | 13,217 | I was confused by this env name variable, assuming it was the value, not the name. Silly nit, but maybe name it noninteractiveEnv? | drud-ddev | php |
@@ -56,9 +56,15 @@ public class TableProperties {
public static final String PARQUET_COMPRESSION = "write.parquet.compression-codec";
public static final String PARQUET_COMPRESSION_DEFAULT = "gzip";
+ public static final String PARQUET_WRITE_MODE = "write.parquet.write-mode";
+ public static final String PARQUET_WRITE_MODE_DEFAULT = "overwrite";
+
public static final String AVRO_COMPRESSION = "write.avro.compression-codec";
public static final String AVRO_COMPRESSION_DEFAULT = "gzip";
+ public static final String AVRO_WRITE_MODE = "write.avro.write-mode";
+ public static final String AVRO_WRITE_MODE_DEFAULT = "overwrite";
+
public static final String SPLIT_SIZE = "read.split.target-size";
public static final long SPLIT_SIZE_DEFAULT = 134217728; // 128 MB
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
public class TableProperties {
private TableProperties() {}
public static final String COMMIT_NUM_RETRIES = "commit.retry.num-retries";
public static final int COMMIT_NUM_RETRIES_DEFAULT = 4;
public static final String COMMIT_MIN_RETRY_WAIT_MS = "commit.retry.min-wait-ms";
public static final int COMMIT_MIN_RETRY_WAIT_MS_DEFAULT = 100;
public static final String COMMIT_MAX_RETRY_WAIT_MS = "commit.retry.max-wait-ms";
public static final int COMMIT_MAX_RETRY_WAIT_MS_DEFAULT = 60000; // 1 minute
public static final String COMMIT_TOTAL_RETRY_TIME_MS = "commit.retry.total-timeout-ms";
public static final int COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT = 1800000; // 30 minutes
public static final String MANIFEST_TARGET_SIZE_BYTES = "commit.manifest.target-size-bytes";
public static final long MANIFEST_TARGET_SIZE_BYTES_DEFAULT = 8388608; // 8 MB
public static final String MANIFEST_MIN_MERGE_COUNT = "commit.manifest.min-count-to-merge";
public static final int MANIFEST_MIN_MERGE_COUNT_DEFAULT = 100;
public static final String DEFAULT_FILE_FORMAT = "write.format.default";
public static final String DEFAULT_FILE_FORMAT_DEFAULT = "parquet";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES = "write.parquet.row-group-size-bytes";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT = "134217728"; // 128 MB
public static final String PARQUET_PAGE_SIZE_BYTES = "write.parquet.page-size-bytes";
public static final String PARQUET_PAGE_SIZE_BYTES_DEFAULT = "1048576"; // 1 MB
public static final String PARQUET_DICT_SIZE_BYTES = "write.parquet.dict-size-bytes";
public static final String PARQUET_DICT_SIZE_BYTES_DEFAULT = "2097152"; // 2 MB
public static final String PARQUET_COMPRESSION = "write.parquet.compression-codec";
public static final String PARQUET_COMPRESSION_DEFAULT = "gzip";
public static final String AVRO_COMPRESSION = "write.avro.compression-codec";
public static final String AVRO_COMPRESSION_DEFAULT = "gzip";
public static final String SPLIT_SIZE = "read.split.target-size";
public static final long SPLIT_SIZE_DEFAULT = 134217728; // 128 MB
public static final String SPLIT_LOOKBACK = "read.split.planning-lookback";
public static final int SPLIT_LOOKBACK_DEFAULT = 10;
public static final String SPLIT_OPEN_FILE_COST = "read.split.open-file-cost";
public static final long SPLIT_OPEN_FILE_COST_DEFAULT = 4 * 1024 * 1024; // 4MB
public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled";
public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false;
public static final String OBJECT_STORE_PATH = "write.object-storage.path";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "data" folder underneath the root path of the table.
public static final String WRITE_NEW_DATA_LOCATION = "write.folder-storage.path";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "metadata" folder underneath the root path of the table.
public static final String WRITE_METADATA_LOCATION = "write.metadata.path";
public static final String MANIFEST_LISTS_ENABLED = "write.manifest-lists.enabled";
public static final boolean MANIFEST_LISTS_ENABLED_DEFAULT = true;
public static final String METADATA_COMPRESSION = "write.metadata.compression-codec";
public static final String METADATA_COMPRESSION_DEFAULT = "none";
public static final String DEFAULT_WRITE_METRICS_MODE = "write.metadata.metrics.default";
public static final String DEFAULT_WRITE_METRICS_MODE_DEFAULT = "truncate(16)";
}
| 1 | 14,524 | nice, having the "overwrite" default makes this change backwards compatible, right? | apache-iceberg | java |
@@ -108,7 +108,7 @@ public class DefaultReactiveOAuth2UserService implements ReactiveOAuth2UserServi
authenticationMethod);
// @formatter:off
Mono<Map<String, Object>> userAttributes = requestHeadersSpec.retrieve()
- .onStatus((s) -> s != HttpStatus.OK, (response) ->
+ .onStatus((s) -> !s.is2xxSuccessful(), (response) ->
parse(response)
.map((userInfoErrorResponse) -> {
String description = userInfoErrorResponse.getErrorObject().getDescription(); | 1 | /*
* Copyright 2002-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.oauth2.client.userinfo;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import com.nimbusds.oauth2.sdk.ErrorObject;
import com.nimbusds.openid.connect.sdk.UserInfoErrorResponse;
import net.minidev.json.JSONObject;
import reactor.core.publisher.Mono;
import org.springframework.core.ParameterizedTypeReference;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.security.core.GrantedAuthority;
import org.springframework.security.core.authority.SimpleGrantedAuthority;
import org.springframework.security.oauth2.core.AuthenticationMethod;
import org.springframework.security.oauth2.core.OAuth2AccessToken;
import org.springframework.security.oauth2.core.OAuth2AuthenticationException;
import org.springframework.security.oauth2.core.OAuth2Error;
import org.springframework.security.oauth2.core.user.DefaultOAuth2User;
import org.springframework.security.oauth2.core.user.OAuth2User;
import org.springframework.security.oauth2.core.user.OAuth2UserAuthority;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;
import org.springframework.web.reactive.function.UnsupportedMediaTypeException;
import org.springframework.web.reactive.function.client.ClientResponse;
import org.springframework.web.reactive.function.client.WebClient;
/**
* An implementation of an {@link ReactiveOAuth2UserService} that supports standard OAuth
* 2.0 Provider's.
* <p>
* For standard OAuth 2.0 Provider's, the attribute name used to access the user's name
* from the UserInfo response is required and therefore must be available via
* {@link org.springframework.security.oauth2.client.registration.ClientRegistration.ProviderDetails.UserInfoEndpoint#getUserNameAttributeName()
* UserInfoEndpoint.getUserNameAttributeName()}.
* <p>
* <b>NOTE:</b> Attribute names are <b>not</b> standardized between providers and
* therefore will vary. Please consult the provider's API documentation for the set of
* supported user attribute names.
*
* @author Rob Winch
* @since 5.1
* @see ReactiveOAuth2UserService
* @see OAuth2UserRequest
* @see OAuth2User
* @see DefaultOAuth2User
*/
public class DefaultReactiveOAuth2UserService implements ReactiveOAuth2UserService<OAuth2UserRequest, OAuth2User> {
private static final String INVALID_USER_INFO_RESPONSE_ERROR_CODE = "invalid_user_info_response";
private static final String MISSING_USER_INFO_URI_ERROR_CODE = "missing_user_info_uri";
private static final String MISSING_USER_NAME_ATTRIBUTE_ERROR_CODE = "missing_user_name_attribute";
private static final ParameterizedTypeReference<Map<String, Object>> STRING_OBJECT_MAP = new ParameterizedTypeReference<Map<String, Object>>() {
};
private static final ParameterizedTypeReference<Map<String, String>> STRING_STRING_MAP = new ParameterizedTypeReference<Map<String, String>>() {
};
private WebClient webClient = WebClient.create();
@Override
public Mono<OAuth2User> loadUser(OAuth2UserRequest userRequest) throws OAuth2AuthenticationException {
return Mono.defer(() -> {
Assert.notNull(userRequest, "userRequest cannot be null");
String userInfoUri = userRequest.getClientRegistration().getProviderDetails().getUserInfoEndpoint()
.getUri();
if (!StringUtils.hasText(userInfoUri)) {
OAuth2Error oauth2Error = new OAuth2Error(MISSING_USER_INFO_URI_ERROR_CODE,
"Missing required UserInfo Uri in UserInfoEndpoint for Client Registration: "
+ userRequest.getClientRegistration().getRegistrationId(),
null);
throw new OAuth2AuthenticationException(oauth2Error, oauth2Error.toString());
}
String userNameAttributeName = userRequest.getClientRegistration().getProviderDetails()
.getUserInfoEndpoint().getUserNameAttributeName();
if (!StringUtils.hasText(userNameAttributeName)) {
OAuth2Error oauth2Error = new OAuth2Error(MISSING_USER_NAME_ATTRIBUTE_ERROR_CODE,
"Missing required \"user name\" attribute name in UserInfoEndpoint for Client Registration: "
+ userRequest.getClientRegistration().getRegistrationId(),
null);
throw new OAuth2AuthenticationException(oauth2Error, oauth2Error.toString());
}
AuthenticationMethod authenticationMethod = userRequest.getClientRegistration().getProviderDetails()
.getUserInfoEndpoint().getAuthenticationMethod();
WebClient.RequestHeadersSpec<?> requestHeadersSpec = getRequestHeaderSpec(userRequest, userInfoUri,
authenticationMethod);
// @formatter:off
Mono<Map<String, Object>> userAttributes = requestHeadersSpec.retrieve()
.onStatus((s) -> s != HttpStatus.OK, (response) ->
parse(response)
.map((userInfoErrorResponse) -> {
String description = userInfoErrorResponse.getErrorObject().getDescription();
OAuth2Error oauth2Error = new OAuth2Error(INVALID_USER_INFO_RESPONSE_ERROR_CODE, description,
null);
throw new OAuth2AuthenticationException(oauth2Error, oauth2Error.toString());
})
)
.bodyToMono(DefaultReactiveOAuth2UserService.STRING_OBJECT_MAP);
return userAttributes.map((attrs) -> {
GrantedAuthority authority = new OAuth2UserAuthority(attrs);
Set<GrantedAuthority> authorities = new HashSet<>();
authorities.add(authority);
OAuth2AccessToken token = userRequest.getAccessToken();
for (String scope : token.getScopes()) {
authorities.add(new SimpleGrantedAuthority("SCOPE_" + scope));
}
return new DefaultOAuth2User(authorities, attrs, userNameAttributeName);
})
.onErrorMap((ex) -> (ex instanceof UnsupportedMediaTypeException ||
ex.getCause() instanceof UnsupportedMediaTypeException), (ex) -> {
String contentType = (ex instanceof UnsupportedMediaTypeException) ?
((UnsupportedMediaTypeException) ex).getContentType().toString() :
((UnsupportedMediaTypeException) ex.getCause()).getContentType().toString();
String errorMessage = "An error occurred while attempting to retrieve the UserInfo Resource from '"
+ userRequest.getClientRegistration().getProviderDetails().getUserInfoEndpoint()
.getUri()
+ "': response contains invalid content type '" + contentType + "'. "
+ "The UserInfo Response should return a JSON object (content type 'application/json') "
+ "that contains a collection of name and value pairs of the claims about the authenticated End-User. "
+ "Please ensure the UserInfo Uri in UserInfoEndpoint for Client Registration '"
+ userRequest.getClientRegistration().getRegistrationId()
+ "' conforms to the UserInfo Endpoint, "
+ "as defined in OpenID Connect 1.0: 'https://openid.net/specs/openid-connect-core-1_0.html#UserInfo'";
OAuth2Error oauth2Error = new OAuth2Error(INVALID_USER_INFO_RESPONSE_ERROR_CODE, errorMessage,
null);
throw new OAuth2AuthenticationException(oauth2Error, oauth2Error.toString(), ex);
})
.onErrorMap((ex) -> {
OAuth2Error oauth2Error = new OAuth2Error(INVALID_USER_INFO_RESPONSE_ERROR_CODE,
"An error occurred reading the UserInfo response: " + ex.getMessage(), null);
return new OAuth2AuthenticationException(oauth2Error, oauth2Error.toString(), ex);
});
});
// @formatter:on
}
private WebClient.RequestHeadersSpec<?> getRequestHeaderSpec(OAuth2UserRequest userRequest, String userInfoUri,
AuthenticationMethod authenticationMethod) {
if (AuthenticationMethod.FORM.equals(authenticationMethod)) {
// @formatter:off
return this.webClient.post()
.uri(userInfoUri)
.header(HttpHeaders.ACCEPT, MediaType.APPLICATION_JSON_VALUE)
.header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_FORM_URLENCODED_VALUE)
.bodyValue("access_token=" + userRequest.getAccessToken().getTokenValue());
// @formatter:on
}
// @formatter:off
return this.webClient.get()
.uri(userInfoUri)
.header(HttpHeaders.ACCEPT, MediaType.APPLICATION_JSON_VALUE)
.headers((headers) -> headers
.setBearerAuth(userRequest.getAccessToken().getTokenValue())
);
// @formatter:on
}
/**
* Sets the {@link WebClient} used for retrieving the user endpoint
* @param webClient the client to use
*/
public void setWebClient(WebClient webClient) {
Assert.notNull(webClient, "webClient cannot be null");
this.webClient = webClient;
}
private static Mono<UserInfoErrorResponse> parse(ClientResponse httpResponse) {
String wwwAuth = httpResponse.headers().asHttpHeaders().getFirst(HttpHeaders.WWW_AUTHENTICATE);
if (!StringUtils.isEmpty(wwwAuth)) {
// Bearer token error?
return Mono.fromCallable(() -> UserInfoErrorResponse.parse(wwwAuth));
}
// Other error?
return httpResponse.bodyToMono(STRING_STRING_MAP)
.map((body) -> new UserInfoErrorResponse(ErrorObject.parse(new JSONObject(body))));
}
}
| 1 | 16,794 | It doesn't make sense to accept all 2xx status code. For example, `203 Non-Authoritative Information` or `205 Reset Content`, etc. | spring-projects-spring-security | java |
@@ -385,16 +385,8 @@ class SplitOp implements CoreAdminHandler.CoreAdminOp {
// compare to current prefix bucket and see if this new term shares the same prefix
if (term != null && term.length >= currPrefix.length && currPrefix.length > 0) {
- int i = 0;
- for (; i < currPrefix.length; i++) {
- if (currPrefix.bytes[i] != term.bytes[term.offset + i]) {
- break;
- }
- }
-
- if (i == currPrefix.length) {
- // prefix was the same (common-case fast path)
- // int count = termsEnum.docFreq();
+ BytesRef termPrefix = new BytesRef(term.bytes, term.offset, currPrefix.length);
+ if (termPrefix.bytesEquals(currPrefix)) {
bucketCount++; // use 1 since we are dealing with unique ids
continue;
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler.admin;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.lucene.index.MultiTerms;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef;
import org.apache.solr.cloud.CloudDescriptor;
import org.apache.solr.cloud.ZkShardTerms;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.CompositeIdRouter;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.DocRouter;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.params.CommonAdminParams;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.core.SolrCore;
import org.apache.solr.request.LocalSolrQueryRequest;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.update.SolrIndexSplitter;
import org.apache.solr.update.SplitIndexCommand;
import org.apache.solr.util.RTimer;
import org.apache.solr.util.RefCounted;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER;
import static org.apache.solr.common.params.CommonParams.PATH;
import static org.apache.solr.common.params.CoreAdminParams.GET_RANGES;
class SplitOp implements CoreAdminHandler.CoreAdminOp {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@Override
public void execute(CoreAdminHandler.CallInfo it) throws Exception {
SolrParams params = it.req.getParams();
String splitKey = params.get("split.key");
String[] newCoreNames = params.getParams("targetCore");
String cname = params.get(CoreAdminParams.CORE, "");
if ( params.getBool(GET_RANGES, false) ) {
handleGetRanges(it, cname);
return;
}
List<DocRouter.Range> ranges = null;
String[] pathsArr = params.getParams(PATH);
String rangesStr = params.get(CoreAdminParams.RANGES); // ranges=a-b,c-d,e-f
if (rangesStr != null) {
String[] rangesArr = rangesStr.split(",");
if (rangesArr.length == 0) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There must be at least one range specified to split an index");
} else {
ranges = new ArrayList<>(rangesArr.length);
for (String r : rangesArr) {
try {
ranges.add(DocRouter.DEFAULT.fromString(r));
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Exception parsing hexadecimal hash range: " + r, e);
}
}
}
}
if ((pathsArr == null || pathsArr.length == 0) && (newCoreNames == null || newCoreNames.length == 0)) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Either path or targetCore param must be specified");
}
log.info("Invoked split action for core: " + cname);
String methodStr = params.get(CommonAdminParams.SPLIT_METHOD, SolrIndexSplitter.SplitMethod.REWRITE.toLower());
SolrIndexSplitter.SplitMethod splitMethod = SolrIndexSplitter.SplitMethod.get(methodStr);
if (splitMethod == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unsupported value of '" + CommonAdminParams.SPLIT_METHOD + "': " + methodStr);
}
SolrCore parentCore = it.handler.coreContainer.getCore(cname);
List<SolrCore> newCores = null;
SolrQueryRequest req = null;
try {
// TODO: allow use of rangesStr in the future
List<String> paths = null;
int partitions = pathsArr != null ? pathsArr.length : newCoreNames.length;
DocRouter router = null;
String routeFieldName = null;
if (it.handler.coreContainer.isZooKeeperAware()) {
ClusterState clusterState = it.handler.coreContainer.getZkController().getClusterState();
String collectionName = parentCore.getCoreDescriptor().getCloudDescriptor().getCollectionName();
DocCollection collection = clusterState.getCollection(collectionName);
String sliceName = parentCore.getCoreDescriptor().getCloudDescriptor().getShardId();
Slice slice = collection.getSlice(sliceName);
router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
if (ranges == null) {
DocRouter.Range currentRange = slice.getRange();
ranges = currentRange != null ? router.partitionRange(partitions, currentRange) : null;
}
Object routerObj = collection.get(DOC_ROUTER); // for back-compat with Solr 4.4
if (routerObj instanceof Map) {
Map routerProps = (Map) routerObj;
routeFieldName = (String) routerProps.get("field");
}
}
if (pathsArr == null) {
newCores = new ArrayList<>(partitions);
for (String newCoreName : newCoreNames) {
SolrCore newcore = it.handler.coreContainer.getCore(newCoreName);
if (newcore != null) {
newCores.add(newcore);
if (it.handler.coreContainer.isZooKeeperAware()) {
// this core must be the only replica in its shard otherwise
// we cannot guarantee consistency between replicas because when we add data to this replica
CloudDescriptor cd = newcore.getCoreDescriptor().getCloudDescriptor();
ClusterState clusterState = it.handler.coreContainer.getZkController().getClusterState();
if (clusterState.getCollection(cd.getCollectionName()).getSlice(cd.getShardId()).getReplicas().size() != 1) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Core with core name " + newCoreName + " must be the only replica in shard " + cd.getShardId());
}
}
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Core with core name " + newCoreName + " expected but doesn't exist.");
}
}
} else {
paths = Arrays.asList(pathsArr);
}
req = new LocalSolrQueryRequest(parentCore, params);
SplitIndexCommand cmd = new SplitIndexCommand(req, it.rsp, paths, newCores, ranges, router, routeFieldName, splitKey, splitMethod);
parentCore.getUpdateHandler().split(cmd);
if (it.handler.coreContainer.isZooKeeperAware()) {
for (SolrCore newcore : newCores) {
// the index of the core changed from empty to have some data, its term must be not zero
CloudDescriptor cd = newcore.getCoreDescriptor().getCloudDescriptor();
ZkShardTerms zkShardTerms = it.handler.coreContainer.getZkController().getShardTerms(cd.getCollectionName(), cd.getShardId());
zkShardTerms.ensureHighestTermsAreNotZero();
}
}
// After the split has completed, someone (here?) should start the process of replaying the buffered updates.
} catch (Exception e) {
log.error("ERROR executing split:", e);
throw e;
} finally {
if (req != null) req.close();
if (parentCore != null) parentCore.close();
if (newCores != null) {
for (SolrCore newCore : newCores) {
newCore.close();
}
}
}
}
/**
* This is called when splitByPrefix is used.
* The overseer called us to get recommended splits taking into
* account actual document distribution over the hash space.
*/
private void handleGetRanges(CoreAdminHandler.CallInfo it, String coreName) throws Exception {
SolrCore parentCore = it.handler.coreContainer.getCore(coreName);
if (parentCore == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown core " + coreName);
}
RefCounted<SolrIndexSearcher> searcherHolder = parentCore.getRealtimeSearcher();
try {
if (!it.handler.coreContainer.isZooKeeperAware()) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Shard splitByPrefix requires SolrCloud mode.");
} else {
SolrIndexSearcher searcher = searcherHolder.get();
String routeFieldName = null;
String prefixField = "id_prefix";
ClusterState clusterState = it.handler.coreContainer.getZkController().getClusterState();
String collectionName = parentCore.getCoreDescriptor().getCloudDescriptor().getCollectionName();
DocCollection collection = clusterState.getCollection(collectionName);
String sliceName = parentCore.getCoreDescriptor().getCloudDescriptor().getShardId();
Slice slice = collection.getSlice(sliceName);
DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
DocRouter.Range currentRange = slice.getRange();
Object routerObj = collection.get(DOC_ROUTER); // for back-compat with Solr 4.4
if (routerObj instanceof Map) {
Map routerProps = (Map) routerObj;
routeFieldName = (String) routerProps.get("field");
}
if (routeFieldName == null) {
routeFieldName = searcher.getSchema().getUniqueKeyField().getName();
}
Collection<RangeCount> counts = getHashHistogram(searcher, prefixField, router, collection);
if (counts.size() == 0) {
// How to determine if we should look at the id field to figure out the prefix buckets?
// There may legitimately be no indexed terms in id_prefix if no ids have a prefix yet.
// For now, avoid using splitByPrefix unless you are actually using prefixes.
counts = getHashHistogramFromId(searcher, searcher.getSchema().getUniqueKeyField().getName(), router, collection);
}
Collection<DocRouter.Range> splits = getSplits(counts, currentRange);
String splitString = toSplitString(splits);
if (splitString == null) {
return;
}
it.rsp.add(CoreAdminParams.RANGES, splitString);
}
} finally {
if (searcherHolder != null) searcherHolder.decref();
if (parentCore != null) parentCore.close();
}
}
static class RangeCount implements Comparable<RangeCount> {
DocRouter.Range range;
int count;
public RangeCount(DocRouter.Range range, int count) {
this.range = range;
this.count = count;
}
@Override
public int hashCode() {
return range.hashCode();
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof RangeCount)) return false;
return this.range.equals( ((RangeCount)obj).range );
}
@Override
public int compareTo(RangeCount o) {
return this.range.compareTo(o.range);
}
@Override
public String toString() {
return range.toString() + "=" + count;
}
}
static String toSplitString(Collection<DocRouter.Range> splits) throws Exception {
if (splits == null) {
return null;
}
StringBuilder sb = new StringBuilder();
for (DocRouter.Range range : splits) {
if (sb.length() > 0) {
sb.append(",");
}
sb.append(range);
}
return sb.toString();
}
/*
* Returns a list of range counts sorted by the range lower bound
*/
static Collection<RangeCount> getHashHistogram(SolrIndexSearcher searcher, String prefixField, DocRouter router, DocCollection collection) throws IOException {
RTimer timer = new RTimer();
TreeMap<DocRouter.Range,RangeCount> counts = new TreeMap<>();
Terms terms = MultiTerms.getTerms(searcher.getIndexReader(), prefixField);
if (terms == null) {
return counts.values();
}
int numPrefixes = 0;
int numTriLevel = 0;
int numCollisions = 0;
long sumBuckets = 0;
TermsEnum termsEnum = terms.iterator();
BytesRef term;
while ((term = termsEnum.next()) != null) {
numPrefixes++;
String termStr = term.utf8ToString();
int firstSep = termStr.indexOf(CompositeIdRouter.SEPARATOR);
// truncate to first separator since we don't support multiple levels currently
// NOTE: this does not currently work for tri-level composite ids since the number of bits allocated to the first ID is 16 for a 2 part id
// and 8 for a 3 part id!
if (firstSep != termStr.length()-1 && firstSep > 0) {
numTriLevel++;
termStr = termStr.substring(0, firstSep+1);
}
DocRouter.Range range = router.getSearchRangeSingle(termStr, null, collection);
int numDocs = termsEnum.docFreq();
sumBuckets += numDocs;
RangeCount rangeCount = new RangeCount(range, numDocs);
RangeCount prev = counts.put(rangeCount.range, rangeCount);
if (prev != null) {
// we hit a hash collision or truncated a prefix to first level, so add the buckets together.
rangeCount.count += prev.count;
numCollisions++;
}
}
log.info("Split histogram: ms={}, numBuckets={} sumBuckets={} numPrefixes={} numTriLevel={} numCollisions={}", timer.getTime(), counts.size(), sumBuckets, numPrefixes, numTriLevel, numCollisions);
return counts.values();
}
/**
* Returns a list of range counts sorted by the range lower bound, using the indexed "id" field (i.e. the terms are full IDs, not just prefixes)
*/
static Collection<RangeCount> getHashHistogramFromId(SolrIndexSearcher searcher, String idField, DocRouter router, DocCollection collection) throws IOException {
RTimer timer = new RTimer();
TreeMap<DocRouter.Range, RangeCount> counts = new TreeMap<>();
Terms terms = MultiTerms.getTerms(searcher.getIndexReader(), idField);
if (terms == null) {
return counts.values();
}
int numPrefixes = 0;
int numCollisions = 0;
long sumBuckets = 0;
byte sep = (byte) CompositeIdRouter.SEPARATOR.charAt(0);
TermsEnum termsEnum = terms.iterator();
BytesRef currPrefix = new BytesRef(); // prefix of the previous "id" term
int bucketCount = 0; // count of the number of docs in the current bucket
// We're going to iterate over all terms, so do the minimum amount of work per term.
// Terms are sorted, so all terms sharing a prefix will be grouped together. The extra work
// is really just limited to stepping over all the terms in the id field.
for (;;) {
BytesRef term = termsEnum.next();
// compare to current prefix bucket and see if this new term shares the same prefix
if (term != null && term.length >= currPrefix.length && currPrefix.length > 0) {
int i = 0;
for (; i < currPrefix.length; i++) {
if (currPrefix.bytes[i] != term.bytes[term.offset + i]) {
break;
}
}
if (i == currPrefix.length) {
// prefix was the same (common-case fast path)
// int count = termsEnum.docFreq();
bucketCount++; // use 1 since we are dealing with unique ids
continue;
}
}
// At this point the prefix did not match, so if we had a bucket we were working on, record it.
if (currPrefix.length > 0) {
numPrefixes++;
sumBuckets += bucketCount;
String currPrefixStr = currPrefix.utf8ToString();
DocRouter.Range range = router.getSearchRangeSingle(currPrefixStr, null, collection);
RangeCount rangeCount = new RangeCount(range, bucketCount);
bucketCount = 0;
RangeCount prev = counts.put(rangeCount.range, rangeCount);
if (prev != null) {
// we hit a hash collision, so add the buckets together.
rangeCount.count += prev.count;
numCollisions++;
}
}
// if the current term is null, we ran out of values
if (term == null) break;
// find the new prefix (if any)
// resize if needed
if (currPrefix.length < term.length) {
currPrefix.bytes = new byte[term.length+10];
}
// Copy the bytes up to and including the separator, and set the length if the separator is found.
// If there was no separator, then length remains 0 and it's the indicator that we have no prefix bucket
currPrefix.length = 0;
for (int i=0; i<term.length; i++) {
byte b = term.bytes[i + term.offset];
currPrefix.bytes[i] = b;
if (b == sep) {
currPrefix.length = i + 1;
bucketCount++;
break;
}
}
}
log.info("Split histogram from idField {}: ms={}, numBuckets={} sumBuckets={} numPrefixes={}numCollisions={}", idField, timer.getTime(), counts.size(), sumBuckets, numPrefixes, numCollisions);
return counts.values();
}
/*
* Returns the list of recommended splits, or null if there is not enough information
*/
static Collection<DocRouter.Range> getSplits(Collection<RangeCount> rawCounts, DocRouter.Range currentRange) throws Exception {
int totalCount = 0;
RangeCount biggest = null; // keep track of the largest in case we need to split it out into it's own shard
RangeCount last = null; // keep track of what the last range is
// Remove counts that don't overlap with currentRange (can happen if someone overrode document routing)
List<RangeCount> counts = new ArrayList<>(rawCounts.size());
for (RangeCount rangeCount : rawCounts) {
if (!rangeCount.range.overlaps(currentRange)) {
continue;
}
totalCount += rangeCount.count;
if (biggest == null || rangeCount.count > biggest.count) {
biggest = rangeCount;
}
counts.add(rangeCount);
last = rangeCount;
}
if (counts.size() == 0) {
// we don't have any data to go off of, so do the split the normal way
return null;
}
List<DocRouter.Range> targetRanges = new ArrayList<>();
if (counts.size() == 1) {
// We have a single range, so we should split it.
// Currently, we only split a prefix/bucket when we have just one, but this could be changed/controlled
// in the future via a allowedSizeDifference parameter (i.e. if just separating prefix buckets results in
// too large of an imbalanced, allow splitting within a prefix)
// It may already be a partial range, so figure that out
int lower = Math.max(last.range.min, currentRange.min);
int upper = Math.min(last.range.max, currentRange.max);
int mid = lower + (upper-lower)/2;
if (mid == lower || mid == upper) {
// shard too small... this should pretty much never happen, but use default split logic if it does.
return null;
}
// Make sure to include the shard's current range in the new ranges so we don't create useless empty shards.
DocRouter.Range lowerRange = new DocRouter.Range(currentRange.min, mid);
DocRouter.Range upperRange = new DocRouter.Range(mid+1, currentRange.max);
targetRanges.add(lowerRange);
targetRanges.add(upperRange);
return targetRanges;
}
// We have at least two ranges, so we want to partition the ranges
// and avoid splitting any individual range.
// The "middle" bucket we are going to find will be included with the lower range and excluded from the upper range.
int targetCount = totalCount / 2;
RangeCount middle = null;
RangeCount prev = null;
int currCount = 0;
for (RangeCount rangeCount : counts) {
currCount += rangeCount.count;
if (currCount >= targetCount) { // this should at least be true on the last range
middle = rangeCount;
break;
}
prev = rangeCount;
}
// check if using the range before the middle one would make a better split point
int overError = currCount - targetCount; // error if we include middle in first split
int underError = targetCount - (currCount - middle.count); // error if we include middle in second split
if (underError < overError) {
middle = prev;
}
// The middle should never be the last, since that means that we won't actually do a split.
// Minimising the error (above) should already ensure this never happens.
assert middle != last;
// Make sure to include the shard's current range in the new ranges so we don't create useless empty shards.
DocRouter.Range lowerRange = new DocRouter.Range(currentRange.min, middle.range.max);
DocRouter.Range upperRange = new DocRouter.Range(middle.range.max+1, currentRange.max);
targetRanges.add(lowerRange);
targetRanges.add(upperRange);
return targetRanges;
}
}
| 1 | 30,161 | Found a better choice here that doesn't involve object creation: StringHelper.startsWith() | apache-lucene-solr | java |
@@ -250,8 +250,15 @@ func (s *nodeServer) attest(
attestedData *common.AttestedData, attestedBefore bool) (
response *nodeattestor.AttestResponse, err error) {
- // TODO: Pick the right node attestor [#222]
- nodeAttestor := s.catalog.NodeAttestors()[0]
+ // Pick the right node attestor
+ var nodeAttestor nodeattestor.NodeAttestor
+ for _, a := range s.catalog.NodeAttestors() {
+ mp := s.catalog.Find(a)
+ if mp.Config.PluginName == attestedData.Type {
+ nodeAttestor = a
+ break
+ }
+ }
attestRequest := &nodeattestor.AttestRequest{
AttestedData: attestedData, | 1 | package server
import (
"crypto/x509"
"errors"
"fmt"
"io"
"net/url"
"path"
"reflect"
"sort"
"strings"
"time"
"github.com/sirupsen/logrus"
"github.com/spiffe/go-spiffe/uri"
"github.com/spiffe/spire/pkg/common/selector"
"github.com/spiffe/spire/pkg/common/util"
"github.com/spiffe/spire/pkg/server/catalog"
"github.com/spiffe/spire/proto/api/node"
"github.com/spiffe/spire/proto/common"
"github.com/spiffe/spire/proto/server/ca"
"github.com/spiffe/spire/proto/server/datastore"
"github.com/spiffe/spire/proto/server/nodeattestor"
"golang.org/x/net/context"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/peer"
)
type nodeServer struct {
l logrus.FieldLogger
catalog catalog.Catalog
trustDomain url.URL
baseSVIDTtl int32
}
//FetchBaseSVID attests the node and gets the base node SVID.
func (s *nodeServer) FetchBaseSVID(
ctx context.Context, request *node.FetchBaseSVIDRequest) (
response *node.FetchBaseSVIDResponse, err error) {
serverCA := s.catalog.CAs()[0]
baseSpiffeIDFromCSR, err := getSpiffeIDFromCSR(request.Csr)
if err != nil {
s.l.Error(err)
return response, errors.New("Error trying to get SpiffeId from CSR")
}
attestedBefore, err := s.isAttested(baseSpiffeIDFromCSR)
if err != nil {
s.l.Error(err)
return response, errors.New("Error trying to check if attested")
}
// Join token is a special case
var attestResponse *nodeattestor.AttestResponse
if request.AttestedData.Type == "join_token" {
attestResponse, err = s.attestToken(request.AttestedData, attestedBefore)
} else {
attestResponse, err = s.attest(request.AttestedData, attestedBefore)
}
if err != nil {
s.l.Error(err)
return response, errors.New("Error trying to attest")
}
err = s.validateAttestation(baseSpiffeIDFromCSR, attestResponse)
if err != nil {
s.l.Error(err)
return response, errors.New("Error trying to validate attestation")
}
signResponse, err := serverCA.SignCsr(&ca.SignCsrRequest{Csr: request.Csr, Ttl: s.baseSVIDTtl})
if err != nil {
s.l.Error(err)
return response, errors.New("Error trying to sign CSR")
}
if attestedBefore {
err = s.updateAttestationEntry(signResponse.SignedCertificate, baseSpiffeIDFromCSR)
if err != nil {
s.l.Error(err)
return response, errors.New("Error trying to update attestation entry")
}
} else {
err = s.createAttestationEntry(signResponse.SignedCertificate, baseSpiffeIDFromCSR, request.AttestedData.Type)
if err != nil {
s.l.Error(err)
return response, errors.New("Error trying to create attestation entry")
}
}
selectors, err := s.resolveSelectors(baseSpiffeIDFromCSR)
if err != nil {
s.l.Error(err)
return response, errors.New("Error trying to get selectors for baseSpiffeID")
}
response, err = s.getFetchBaseSVIDResponse(
baseSpiffeIDFromCSR, signResponse.SignedCertificate, selectors)
if err != nil {
s.l.Error(err)
return response, errors.New("Error trying to compose response")
}
s.l.Info("Received node attestation request from ", baseSpiffeIDFromCSR,
" using strategy '", request.AttestedData.Type,
"' completed successfully. SVID issued with TTL=", s.baseSVIDTtl)
return response, nil
}
//FetchSVID gets Workload, Agent certs and CA trust bundles.
//Also used for rotation Base Node SVID or the Registered Node SVID used for this call.
//List can be empty to allow Node Agent cache refresh).
func (s *nodeServer) FetchSVID(server node.Node_FetchSVIDServer) (err error) {
for {
request, err := server.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
peerCert, err := s.getCertFromCtx(server.Context())
if err != nil {
s.l.Error(err)
return errors.New("An SVID is required for this request")
}
uriNames, err := uri.GetURINamesFromCertificate(peerCert)
if err != nil {
s.l.Error(err)
return errors.New("An SPIFFE ID is required for this request")
}
ctxSpiffeID := uriNames[0]
selectors, err := s.getStoredSelectors(ctxSpiffeID)
if err != nil {
s.l.Error(err)
return errors.New("Error trying to get stored selectors")
}
regEntries, err := s.fetchRegistrationEntries(selectors, ctxSpiffeID)
if err != nil {
s.l.Error(err)
return errors.New("Error trying to get registration entries")
}
svids, err := s.signCSRs(peerCert, request.Csrs, regEntries)
if err != nil {
s.l.Error(err)
return errors.New("Error trying sign CSRs")
}
server.Send(&node.FetchSVIDResponse{
SvidUpdate: &node.SvidUpdate{
Svids: svids,
RegistrationEntries: regEntries,
},
})
}
}
//TODO
func (s *nodeServer) FetchCPBundle(
ctx context.Context, request *node.FetchCPBundleRequest) (
response *node.FetchCPBundleResponse, err error) {
return response, nil
}
//TODO
func (s *nodeServer) FetchFederatedBundle(
ctx context.Context, request *node.FetchFederatedBundleRequest) (
response *node.FetchFederatedBundleResponse, err error) {
return response, nil
}
func (s *nodeServer) fetchRegistrationEntries(selectors []*common.Selector, spiffeID string) (
[]*common.RegistrationEntry, error) {
dataStore := s.catalog.DataStores()[0]
///lookup Registration Entries for resolved selectors
var entries []*common.RegistrationEntry
var selectorsEntries []*common.RegistrationEntry
set := selector.NewSet(selectors)
reqs := []*datastore.ListSelectorEntriesRequest{}
for subset := range set.Power() {
reqs = append(reqs, &datastore.ListSelectorEntriesRequest{Selectors: subset.Raw()})
}
for _, req := range reqs {
listSelectorResponse, err := dataStore.ListSelectorEntries(req)
if err != nil {
return nil, err
}
selectorsEntries = append(selectorsEntries, listSelectorResponse.RegisteredEntryList...)
}
entries = append(entries, selectorsEntries...)
///lookup Registration Entries where spiffeID is the parent ID
listResponse, err := dataStore.ListParentIDEntries(&datastore.ListParentIDEntriesRequest{ParentId: spiffeID})
if err != nil {
return nil, err
}
///append parentEntries
for _, entry := range listResponse.RegisteredEntryList {
exists := false
sort.Slice(entry.Selectors, util.SelectorsSortFunction(entry.Selectors))
for _, oldEntry := range selectorsEntries {
sort.Slice(oldEntry.Selectors, util.SelectorsSortFunction(oldEntry.Selectors))
if reflect.DeepEqual(entry, oldEntry) {
exists = true
}
}
if !exists {
entries = append(entries, entry)
}
}
return entries, err
}
func (s *nodeServer) isAttested(baseSpiffeID string) (bool, error) {
dataStore := s.catalog.DataStores()[0]
fetchRequest := &datastore.FetchAttestedNodeEntryRequest{
BaseSpiffeId: baseSpiffeID,
}
fetchResponse, err := dataStore.FetchAttestedNodeEntry(fetchRequest)
if err != nil {
return false, err
}
attestedEntry := fetchResponse.AttestedNodeEntry
if attestedEntry != nil && attestedEntry.BaseSpiffeId == baseSpiffeID {
return true, nil
}
return false, nil
}
func (s *nodeServer) attest(
attestedData *common.AttestedData, attestedBefore bool) (
response *nodeattestor.AttestResponse, err error) {
// TODO: Pick the right node attestor [#222]
nodeAttestor := s.catalog.NodeAttestors()[0]
attestRequest := &nodeattestor.AttestRequest{
AttestedData: attestedData,
AttestedBefore: attestedBefore,
}
attestResponse, err := nodeAttestor.Attest(attestRequest)
if err != nil {
return nil, err
}
return attestResponse, nil
}
func (s *nodeServer) attestToken(
attestedData *common.AttestedData, attestedBefore bool) (
response *nodeattestor.AttestResponse, err error) {
if attestedBefore {
return nil, errors.New("join token has already been used")
}
ds := s.catalog.DataStores()[0]
req := &datastore.JoinToken{Token: string(attestedData.Data)}
t, err := ds.FetchToken(req)
if err != nil {
return nil, err
}
if t.Token == "" {
return nil, errors.New("invalid join token")
}
if time.Unix(t.Expiry, 0).Before(time.Now()) {
// Don't fail if we can't delete
_, _ = ds.DeleteToken(req)
return nil, errors.New("join token expired")
}
// If we're here, the token is valid
// Don't fail if we can't delete
_, _ = ds.DeleteToken(req)
id := &url.URL{
Scheme: s.trustDomain.Scheme,
Host: s.trustDomain.Host,
Path: path.Join("spire", "agent", "join_token", t.Token),
}
resp := &nodeattestor.AttestResponse{
Valid: true,
BaseSPIFFEID: id.String(),
}
return resp, nil
}
func (s *nodeServer) validateAttestation(
csrBaseSpiffeID string, attestResponse *nodeattestor.AttestResponse) error {
if !attestResponse.Valid {
return errors.New("Invalid")
}
//check if baseSPIFFEID in attest response matches with SPIFFEID in CSR
if attestResponse.BaseSPIFFEID != csrBaseSpiffeID {
return errors.New("BaseSPIFFEID Mismatch")
}
return nil
}
func (s *nodeServer) updateAttestationEntry(
certificate []byte, baseSPIFFEID string) error {
dataStore := s.catalog.DataStores()[0]
cert, err := x509.ParseCertificate(certificate)
if err != nil {
return err
}
updateRequest := &datastore.UpdateAttestedNodeEntryRequest{
BaseSpiffeId: baseSPIFFEID,
CertExpirationDate: cert.NotAfter.Format(time.RFC1123Z),
CertSerialNumber: cert.SerialNumber.String(),
}
_, err = dataStore.UpdateAttestedNodeEntry(updateRequest)
if err != nil {
return err
}
return nil
}
func (s *nodeServer) createAttestationEntry(
certificate []byte, baseSPIFFEID string, attestationType string) error {
dataStore := s.catalog.DataStores()[0]
cert, err := x509.ParseCertificate(certificate)
if err != nil {
return err
}
createRequest := &datastore.CreateAttestedNodeEntryRequest{
AttestedNodeEntry: &datastore.AttestedNodeEntry{
AttestedDataType: attestationType,
BaseSpiffeId: baseSPIFFEID,
CertExpirationDate: cert.NotAfter.Format(time.RFC1123Z),
CertSerialNumber: cert.SerialNumber.String(),
}}
_, err = dataStore.CreateAttestedNodeEntry(createRequest)
if err != nil {
return err
}
return nil
}
func (s *nodeServer) resolveSelectors(
baseSpiffeID string) ([]*common.Selector, error) {
dataStore := s.catalog.DataStores()[0]
nodeResolver := s.catalog.NodeResolvers()[0]
//Call node resolver plugin to get a map of spiffeID=>Selector
selectors, err := nodeResolver.Resolve([]string{baseSpiffeID})
if err != nil {
return nil, err
}
baseSelectors, ok := selectors[baseSpiffeID]
if ok {
// TODO: Fix complexity
for _, selector := range baseSelectors.Entries {
mapEntryRequest := &datastore.CreateNodeResolverMapEntryRequest{
NodeResolverMapEntry: &datastore.NodeResolverMapEntry{
BaseSpiffeId: baseSpiffeID,
Selector: selector,
},
}
_, err = dataStore.CreateNodeResolverMapEntry(mapEntryRequest)
if err != nil {
return nil, err
}
}
return baseSelectors.Entries, nil
}
return []*common.Selector{}, nil
}
func (s *nodeServer) getStoredSelectors(
baseSpiffeID string) ([]*common.Selector, error) {
dataStore := s.catalog.DataStores()[0]
req := &datastore.FetchNodeResolverMapEntryRequest{BaseSpiffeId: baseSpiffeID}
nodeResolutionResponse, err := dataStore.FetchNodeResolverMapEntry(req)
if err != nil {
return nil, err
}
var selectors []*common.Selector
for _, item := range nodeResolutionResponse.NodeResolverMapEntryList {
selectors = append(selectors, item.Selector)
}
return selectors, nil
}
func (s *nodeServer) getFetchBaseSVIDResponse(
baseSpiffeID string, baseSvid []byte, selectors []*common.Selector) (
*node.FetchBaseSVIDResponse, error) {
svids := make(map[string]*node.Svid)
svids[baseSpiffeID] = &node.Svid{
SvidCert: baseSvid,
Ttl: s.baseSVIDTtl,
}
regEntries, err := s.fetchRegistrationEntries(selectors, baseSpiffeID)
if err != nil {
return nil, err
}
svidUpdate := &node.SvidUpdate{
Svids: svids,
RegistrationEntries: regEntries,
}
return &node.FetchBaseSVIDResponse{SvidUpdate: svidUpdate}, nil
}
func (s *nodeServer) getCertFromCtx(ctx context.Context) (certificate *x509.Certificate, err error) {
ctxPeer, ok := peer.FromContext(ctx)
if !ok {
return nil, errors.New("It was not posible to extract peer from request")
}
tlsInfo, ok := ctxPeer.AuthInfo.(credentials.TLSInfo)
if !ok {
return nil, errors.New("It was not posible to extract AuthInfo from request")
}
if len(tlsInfo.State.PeerCertificates) == 0 {
return nil, errors.New("PeerCertificates was empty")
}
return tlsInfo.State.PeerCertificates[0], nil
}
func (s *nodeServer) signCSRs(
peerCert *x509.Certificate, csrs [][]byte, regEntries []*common.RegistrationEntry) (
svids map[string]*node.Svid, err error) {
uriNames, err := uri.GetURINamesFromCertificate(peerCert)
if err != nil {
s.l.Error(err)
return nil, errors.New("An SPIFFE ID is required for this request")
}
callerID := uriNames[0]
//convert registration entries into a map for easy lookup
regEntriesMap := make(map[string]*common.RegistrationEntry)
for _, entry := range regEntries {
regEntriesMap[entry.SpiffeId] = entry
}
dataStore := s.catalog.DataStores()[0]
svids = make(map[string]*node.Svid)
//iterate the CSRs and sign them
for _, csr := range csrs {
spiffeID, err := getSpiffeIDFromCSR(csr)
if err != nil {
return nil, err
}
baseSpiffeIDPrefix := fmt.Sprintf("%s/spire/agent", s.trustDomain.String())
if spiffeID == callerID && strings.HasPrefix(callerID, baseSpiffeIDPrefix) {
res, err := dataStore.FetchAttestedNodeEntry(
&datastore.FetchAttestedNodeEntryRequest{BaseSpiffeId: spiffeID},
)
if err != nil {
return nil, err
}
if res.AttestedNodeEntry.CertSerialNumber != peerCert.SerialNumber.String() {
err := errors.New("SVID serial number does not match")
return nil, err
}
svid, err := s.buildBaseSVID(csr)
if err != nil {
return nil, err
}
svids[spiffeID] = svid
s.updateAttestationEntry(svid.SvidCert, spiffeID)
if err != nil {
return nil, err
}
} else {
svid, err := s.buildSVID(spiffeID, regEntriesMap, csr)
if err != nil {
return nil, err
}
svids[spiffeID] = svid
}
}
return svids, nil
}
func (s *nodeServer) buildSVID(
spiffeID string, regEntries map[string]*common.RegistrationEntry, csr []byte) (
*node.Svid, error) {
serverCA := s.catalog.CAs()[0]
//TODO: Validate that other fields are not populated https://github.com/spiffe/spire/issues/161
//validate that is present in the registration entries, otherwise we shouldn't sign
entry, ok := regEntries[spiffeID]
if !ok {
err := errors.New("Not entitled to sign CSR")
return nil, err
}
signReq := &ca.SignCsrRequest{Csr: csr, Ttl: entry.Ttl}
signResponse, err := serverCA.SignCsr(signReq)
if err != nil {
return nil, err
}
return &node.Svid{SvidCert: signResponse.SignedCertificate, Ttl: entry.Ttl}, nil
}
func (s *nodeServer) buildBaseSVID(csr []byte) (*node.Svid, error) {
serverCA := s.catalog.CAs()[0]
signReq := &ca.SignCsrRequest{Csr: csr, Ttl: s.baseSVIDTtl}
signResponse, err := serverCA.SignCsr(signReq)
if err != nil {
return nil, err
}
return &node.Svid{
SvidCert: signResponse.SignedCertificate, Ttl: s.baseSVIDTtl,
}, nil
}
//TODO: put this into go-spiffe uri?
func getSpiffeIDFromCSR(csr []byte) (spiffeID string, err error) {
var parsedCSR *x509.CertificateRequest
if parsedCSR, err = x509.ParseCertificateRequest(csr); err != nil {
return spiffeID, err
}
var uris []string
uris, err = uri.GetURINamesFromExtensions(&parsedCSR.Extensions)
if len(uris) != 1 {
return spiffeID, errors.New("The CSR must have exactly one URI SAN")
}
spiffeID = uris[0]
return spiffeID, nil
}
| 1 | 8,917 | Shouldn't we consider the situation where it doesn't find a plugin? | spiffe-spire | go |
@@ -24,7 +24,12 @@ namespace oneapi::dal::csv {
template <>
class detail::v1::read_args_impl<table> : public base {
public:
- read_args_impl() {}
+ read_args_impl(preview::read_mode mode = preview::read_mode::table) : mode(mode) {
+ if (mode != preview::read_mode::table)
+ throw invalid_argument(dal::detail::error_messages::unsupported_read_mode());
+ }
+
+ preview::read_mode mode;
};
namespace v1 { | 1 | /*******************************************************************************
* Copyright 2020-2021 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include "oneapi/dal/io/csv/read_types.hpp"
#include "oneapi/dal/detail/common.hpp"
#include "oneapi/dal/detail/memory.hpp"
#include "oneapi/dal/table/common.hpp"
namespace oneapi::dal::csv {
template <>
class detail::v1::read_args_impl<table> : public base {
public:
read_args_impl() {}
};
namespace v1 {
read_args<table>::read_args() : impl_(new detail::read_args_impl<table>()) {}
} // namespace v1
} // namespace oneapi::dal::csv
| 1 | 31,781 | Do not understand why this needed for table. All new classes defined in the `preview` namespace. | oneapi-src-oneDAL | cpp |
@@ -17,6 +17,8 @@
package com.yahoo.athenz.common.server.notification;
public final class NotificationServiceConstants {
+ public static final String NOTIFICATION_PROP_SERVICE_FACTORY_CLASS = "athenz.zms.notification_service_factory_class";
+
public static final String NOTIFICATION_TYPE_MEMBERSHIP_APPROVAL = "MEMBERSHIP_APPROVAL";
public static final String NOTIFICATION_TYPE_MEMBERSHIP_APPROVAL_REMINDER = "MEMBERSHIP_APPROVAL_REMINDER";
public static final String NOTIFICATION_TYPE_PRINCIPAL_EXPIRY_REMINDER = "PRINCIPAL_EXPIRY_REMINDER"; | 1 | /*
* Copyright 2020 Verizon Media
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.athenz.common.server.notification;
public final class NotificationServiceConstants {
public static final String NOTIFICATION_TYPE_MEMBERSHIP_APPROVAL = "MEMBERSHIP_APPROVAL";
public static final String NOTIFICATION_TYPE_MEMBERSHIP_APPROVAL_REMINDER = "MEMBERSHIP_APPROVAL_REMINDER";
public static final String NOTIFICATION_TYPE_PRINCIPAL_EXPIRY_REMINDER = "PRINCIPAL_EXPIRY_REMINDER";
public static final String NOTIFICATION_TYPE_DOMAIN_MEMBER_EXPIRY_REMINDER = "DOMAIN_MEMBER_EXPIRY_REMINDER";
public static final String NOTIFICATION_DETAILS_DOMAIN = "domain";
public static final String NOTIFICATION_DETAILS_ROLE = "role";
public static final String NOTIFICATION_DETAILS_MEMBER = "member";
public static final String NOTIFICATION_DETAILS_REASON = "reason";
public static final String NOTIFICATION_DETAILS_REQUESTER = "requester";
public static final String NOTIFICATION_DETAILS_EXPIRY_ROLES = "expiryRoles";
public static final String NOTIFICATION_DETAILS_EXPIRY_MEMBERS = "expiryMembers";
public static final String HTML_LOGO_CID_PLACEHOLDER = "<logo>";
public static final String CHARSET_UTF_8 = "UTF-8";
private NotificationServiceConstants() {
}
}
| 1 | 4,936 | Kept the same property value even though it is now in Common to avoid breaking existing clients. | AthenZ-athenz | java |
@@ -52,7 +52,7 @@ public class InclusiveMetricsEvaluator {
return visitors.get();
}
- InclusiveMetricsEvaluator(Schema schema, Expression unbound) {
+ public InclusiveMetricsEvaluator(Schema schema, Expression unbound) {
this(schema, unbound, true);
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.expressions;
import java.nio.ByteBuffer;
import java.util.Map;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.Schema;
import org.apache.iceberg.expressions.ExpressionVisitors.BoundExpressionVisitor;
import org.apache.iceberg.types.Conversions;
import org.apache.iceberg.types.Types.StructType;
import static org.apache.iceberg.expressions.Expressions.rewriteNot;
/**
* Evaluates an {@link Expression} on a {@link DataFile} to test whether rows in the file may match.
* <p>
* This evaluation is inclusive: it returns true if a file may match and false if it cannot match.
* <p>
* Files are passed to {@link #eval(DataFile)}, which returns true if the file may contain matching
* rows and false if the file cannot contain matching rows. Files may be skipped if and only if the
* return value of {@code eval} is false.
*/
public class InclusiveMetricsEvaluator {
private final Schema schema;
private final StructType struct;
private final Expression expr;
private final boolean caseSensitive;
private transient ThreadLocal<MetricsEvalVisitor> visitors = null;
private MetricsEvalVisitor visitor() {
if (visitors == null) {
this.visitors = ThreadLocal.withInitial(MetricsEvalVisitor::new);
}
return visitors.get();
}
InclusiveMetricsEvaluator(Schema schema, Expression unbound) {
this(schema, unbound, true);
}
public InclusiveMetricsEvaluator(Schema schema, Expression unbound, boolean caseSensitive) {
this.schema = schema;
this.struct = schema.asStruct();
this.caseSensitive = caseSensitive;
this.expr = Binder.bind(struct, rewriteNot(unbound), caseSensitive);
}
/**
* Test whether the file may contain records that match the expression.
*
* @param file a data file
* @return false if the file cannot contain rows that match the expression, true otherwise.
*/
public boolean eval(DataFile file) {
// TODO: detect the case where a column is missing from the file using file's max field id.
return visitor().eval(file);
}
private static final boolean ROWS_MIGHT_MATCH = true;
private static final boolean ROWS_CANNOT_MATCH = false;
private class MetricsEvalVisitor extends BoundExpressionVisitor<Boolean> {
private Map<Integer, Long> valueCounts = null;
private Map<Integer, Long> nullCounts = null;
private Map<Integer, ByteBuffer> lowerBounds = null;
private Map<Integer, ByteBuffer> upperBounds = null;
private boolean eval(DataFile file) {
if (file.recordCount() <= 0) {
return ROWS_CANNOT_MATCH;
}
this.valueCounts = file.valueCounts();
this.nullCounts = file.nullValueCounts();
this.lowerBounds = file.lowerBounds();
this.upperBounds = file.upperBounds();
return ExpressionVisitors.visit(expr, this);
}
@Override
public Boolean alwaysTrue() {
return ROWS_MIGHT_MATCH; // all rows match
}
@Override
public Boolean alwaysFalse() {
return ROWS_CANNOT_MATCH; // all rows fail
}
@Override
public Boolean not(Boolean result) {
return !result;
}
@Override
public Boolean and(Boolean leftResult, Boolean rightResult) {
return leftResult && rightResult;
}
@Override
public Boolean or(Boolean leftResult, Boolean rightResult) {
return leftResult || rightResult;
}
@Override
public <T> Boolean isNull(BoundReference<T> ref) {
// no need to check whether the field is required because binding evaluates that case
// if the column has no null values, the expression cannot match
Integer id = ref.fieldId();
if (nullCounts != null && nullCounts.containsKey(id) && nullCounts.get(id) == 0) {
return ROWS_CANNOT_MATCH;
}
return ROWS_MIGHT_MATCH;
}
@Override
public <T> Boolean notNull(BoundReference<T> ref) {
// no need to check whether the field is required because binding evaluates that case
// if the column has no non-null values, the expression cannot match
Integer id = ref.fieldId();
if (valueCounts != null && valueCounts.containsKey(id) &&
nullCounts != null && nullCounts.containsKey(id) &&
valueCounts.get(id) - nullCounts.get(id) == 0) {
return ROWS_CANNOT_MATCH;
}
return ROWS_MIGHT_MATCH;
}
@Override
public <T> Boolean lt(BoundReference<T> ref, Literal<T> lit) {
Integer id = ref.fieldId();
if (lowerBounds != null && lowerBounds.containsKey(id)) {
T lower = Conversions.fromByteBuffer(ref.type(), lowerBounds.get(id));
int cmp = lit.comparator().compare(lower, lit.value());
if (cmp >= 0) {
return ROWS_CANNOT_MATCH;
}
}
return ROWS_MIGHT_MATCH;
}
@Override
public <T> Boolean ltEq(BoundReference<T> ref, Literal<T> lit) {
Integer id = ref.fieldId();
if (lowerBounds != null && lowerBounds.containsKey(id)) {
T lower = Conversions.fromByteBuffer(ref.type(), lowerBounds.get(id));
int cmp = lit.comparator().compare(lower, lit.value());
if (cmp > 0) {
return ROWS_CANNOT_MATCH;
}
}
return ROWS_MIGHT_MATCH;
}
@Override
public <T> Boolean gt(BoundReference<T> ref, Literal<T> lit) {
Integer id = ref.fieldId();
if (upperBounds != null && upperBounds.containsKey(id)) {
T upper = Conversions.fromByteBuffer(ref.type(), upperBounds.get(id));
int cmp = lit.comparator().compare(upper, lit.value());
if (cmp <= 0) {
return ROWS_CANNOT_MATCH;
}
}
return ROWS_MIGHT_MATCH;
}
@Override
public <T> Boolean gtEq(BoundReference<T> ref, Literal<T> lit) {
Integer id = ref.fieldId();
if (upperBounds != null && upperBounds.containsKey(id)) {
T upper = Conversions.fromByteBuffer(ref.type(), upperBounds.get(id));
int cmp = lit.comparator().compare(upper, lit.value());
if (cmp < 0) {
return ROWS_CANNOT_MATCH;
}
}
return ROWS_MIGHT_MATCH;
}
@Override
public <T> Boolean eq(BoundReference<T> ref, Literal<T> lit) {
Integer id = ref.fieldId();
if (lowerBounds != null && lowerBounds.containsKey(id)) {
T lower = Conversions.fromByteBuffer(ref.type(), lowerBounds.get(id));
int cmp = lit.comparator().compare(lower, lit.value());
if (cmp > 0) {
return ROWS_CANNOT_MATCH;
}
}
if (upperBounds != null && upperBounds.containsKey(id)) {
T upper = Conversions.fromByteBuffer(ref.type(), upperBounds.get(id));
int cmp = lit.comparator().compare(upper, lit.value());
if (cmp < 0) {
return ROWS_CANNOT_MATCH;
}
}
return ROWS_MIGHT_MATCH;
}
@Override
public <T> Boolean notEq(BoundReference<T> ref, Literal<T> lit) {
// because the bounds are not necessarily a min or max value, this cannot be answered using
// them. notEq(col, X) with (X, Y) doesn't guarantee that X is a value in col.
return ROWS_MIGHT_MATCH;
}
@Override
public <T> Boolean in(BoundReference<T> ref, Literal<T> lit) {
return ROWS_MIGHT_MATCH;
}
@Override
public <T> Boolean notIn(BoundReference<T> ref, Literal<T> lit) {
return ROWS_MIGHT_MATCH;
}
}
}
| 1 | 14,912 | TODO: this is temporary until we figure out case sensitivity for metrics evaluators. | apache-iceberg | java |
@@ -503,9 +503,10 @@ func (node *Node) Start(ctx context.Context) error {
// TODO it is possible the syncer interface should be modified to
// make use of the additional context not used here (from addr + height).
// To keep things simple for now this info is not used.
- err := node.Syncer.HandleNewBlocks(context.Background(), cids)
+ cidSet := types.NewSortedCidSet(cids...)
+ err := node.Syncer.HandleNewTipset(context.Background(), cidSet)
if err != nil {
- log.Infof("error handling blocks: %s", types.NewSortedCidSet(cids...).String())
+ log.Infof("error handling blocks: %s", cidSet.String())
}
}
node.HelloSvc = hello.New(node.Host(), node.ChainReader.GenesisCid(), syncCallBack, node.ChainReader.Head, node.Repo.Config().Net, flags.Commit) | 1 | package node
import (
"context"
"encoding/json"
"fmt"
"os"
"sync"
"time"
"github.com/ipfs/go-bitswap"
bsnet "github.com/ipfs/go-bitswap/network"
bserv "github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-hamt-ipld"
bstore "github.com/ipfs/go-ipfs-blockstore"
"github.com/ipfs/go-ipfs-exchange-interface"
"github.com/ipfs/go-ipfs-exchange-offline"
offroute "github.com/ipfs/go-ipfs-routing/offline"
logging "github.com/ipfs/go-log"
"github.com/ipfs/go-merkledag"
"github.com/libp2p/go-libp2p"
autonatsvc "github.com/libp2p/go-libp2p-autonat-svc"
circuit "github.com/libp2p/go-libp2p-circuit"
"github.com/libp2p/go-libp2p-host"
"github.com/libp2p/go-libp2p-kad-dht"
"github.com/libp2p/go-libp2p-kad-dht/opts"
p2pmetrics "github.com/libp2p/go-libp2p-metrics"
libp2ppeer "github.com/libp2p/go-libp2p-peer"
dhtprotocol "github.com/libp2p/go-libp2p-protocol"
libp2pps "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p-routing"
rhost "github.com/libp2p/go-libp2p/p2p/host/routed"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"github.com/filecoin-project/go-filecoin/abi"
"github.com/filecoin-project/go-filecoin/actor/builtin"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/chain"
"github.com/filecoin-project/go-filecoin/config"
"github.com/filecoin-project/go-filecoin/consensus"
"github.com/filecoin-project/go-filecoin/core"
"github.com/filecoin-project/go-filecoin/flags"
"github.com/filecoin-project/go-filecoin/metrics"
"github.com/filecoin-project/go-filecoin/mining"
"github.com/filecoin-project/go-filecoin/net"
"github.com/filecoin-project/go-filecoin/net/pubsub"
"github.com/filecoin-project/go-filecoin/plumbing"
"github.com/filecoin-project/go-filecoin/plumbing/cfg"
"github.com/filecoin-project/go-filecoin/plumbing/dag"
"github.com/filecoin-project/go-filecoin/plumbing/msg"
"github.com/filecoin-project/go-filecoin/plumbing/mthdsig"
"github.com/filecoin-project/go-filecoin/plumbing/strgdls"
"github.com/filecoin-project/go-filecoin/porcelain"
"github.com/filecoin-project/go-filecoin/proofs"
"github.com/filecoin-project/go-filecoin/proofs/sectorbuilder"
"github.com/filecoin-project/go-filecoin/protocol/block"
"github.com/filecoin-project/go-filecoin/protocol/hello"
"github.com/filecoin-project/go-filecoin/protocol/retrieval"
"github.com/filecoin-project/go-filecoin/protocol/storage"
"github.com/filecoin-project/go-filecoin/repo"
"github.com/filecoin-project/go-filecoin/sampling"
"github.com/filecoin-project/go-filecoin/state"
"github.com/filecoin-project/go-filecoin/types"
"github.com/filecoin-project/go-filecoin/wallet"
)
const (
filecoinDHTProtocol dhtprotocol.ID = "/fil/kad/1.0.0"
)
var log = logging.Logger("node") // nolint: deadcode
var (
// ErrNoMinerAddress is returned when the node is not configured to have any miner addresses.
ErrNoMinerAddress = errors.New("no miner addresses configured")
)
type pubSubProcessorFunc func(ctx context.Context, msg pubsub.Message) error
// Node represents a full Filecoin node.
type Node struct {
host host.Host
PeerHost host.Host
Consensus consensus.Protocol
ChainReader chain.ReadStore
Syncer chain.Syncer
PowerTable consensus.PowerTableView
BlockMiningAPI *block.MiningAPI
PorcelainAPI *porcelain.API
RetrievalAPI *retrieval.API
StorageAPI *storage.API
// HeavyTipSetCh is a subscription to the heaviest tipset topic on the chain.
HeaviestTipSetCh chan interface{}
// HeavyTipSetHandled is a hook for tests because pubsub notifications
// arrive async. It's called after handling a new heaviest tipset.
// Remove this after replacing the tipset "pubsub" with a synchronous event bus:
// https://github.com/filecoin-project/go-filecoin/issues/2309
HeaviestTipSetHandled func()
// Incoming messages for block mining.
MsgPool *core.MessagePool
// Messages sent and not yet mined.
Outbox *core.MessageQueue
Wallet *wallet.Wallet
// Mining stuff.
AddNewlyMinedBlock newBlockFunc
blockTime time.Duration
cancelMining context.CancelFunc
GetAncestorsFunc mining.GetAncestors
GetStateTreeFunc mining.GetStateTree
GetWeightFunc mining.GetWeight
MiningWorker mining.Worker
MiningScheduler mining.Scheduler
mining struct {
sync.Mutex
isMining bool
}
miningCtx context.Context
miningDoneWg *sync.WaitGroup
// Storage Market Interfaces
StorageMiner *storage.Miner
// Retrieval Interfaces
RetrievalMiner *retrieval.Miner
// Network Fields
BlockSub pubsub.Subscription
MessageSub pubsub.Subscription
HelloSvc *hello.Handler
Bootstrapper *net.Bootstrapper
// Data Storage Fields
// Repo is the repo this node was created with
// it contains all persistent artifacts of the filecoin node
Repo repo.Repo
// SectorBuilder is used by the miner to fill and seal sectors.
sectorBuilder sectorbuilder.SectorBuilder
// Fetcher is the interface for fetching data from nodes.
Fetcher *net.Fetcher
// Exchange is the interface for fetching data from other nodes.
Exchange exchange.Interface
// Blockstore is the un-networked blocks interface
Blockstore bstore.Blockstore
// Blockservice is a higher level interface for fetching data
blockservice bserv.BlockService
// CborStore is a temporary interface for interacting with IPLD objects.
cborStore *hamt.CborIpldStore
// cancelSubscriptionsCtx is a handle to cancel the block and message subscriptions.
cancelSubscriptionsCtx context.CancelFunc
// OfflineMode, when true, disables libp2p
OfflineMode bool
// Router is a router from IPFS
Router routing.IpfsRouting
}
// Config is a helper to aid in the construction of a filecoin node.
type Config struct {
BlockTime time.Duration
Libp2pOpts []libp2p.Option
OfflineMode bool
Verifier proofs.Verifier
Rewarder consensus.BlockRewarder
Repo repo.Repo
IsRelay bool
}
// ConfigOpt is a configuration option for a filecoin node.
type ConfigOpt func(*Config) error
// OfflineMode enables or disables offline mode.
func OfflineMode(offlineMode bool) ConfigOpt {
return func(c *Config) error {
c.OfflineMode = offlineMode
return nil
}
}
// IsRelay configures node to act as a libp2p relay.
func IsRelay() ConfigOpt {
return func(c *Config) error {
c.IsRelay = true
return nil
}
}
// BlockTime sets the blockTime.
func BlockTime(blockTime time.Duration) ConfigOpt {
return func(c *Config) error {
c.BlockTime = blockTime
return nil
}
}
// Libp2pOptions returns a node config option that sets up the libp2p node
func Libp2pOptions(opts ...libp2p.Option) ConfigOpt {
return func(nc *Config) error {
// Quietly having your options overridden leads to hair loss
if len(nc.Libp2pOpts) > 0 {
panic("Libp2pOptions can only be called once")
}
nc.Libp2pOpts = opts
return nil
}
}
// VerifierConfigOption returns a function that sets the verifier to use in the node consensus
func VerifierConfigOption(verifier proofs.Verifier) ConfigOpt {
return func(c *Config) error {
c.Verifier = verifier
return nil
}
}
// RewarderConfigOption returns a function that sets the rewarder to use in the node consensus
func RewarderConfigOption(rewarder consensus.BlockRewarder) ConfigOpt {
return func(c *Config) error {
c.Rewarder = rewarder
return nil
}
}
// New creates a new node.
func New(ctx context.Context, opts ...ConfigOpt) (*Node, error) {
n := &Config{}
for _, o := range opts {
if err := o(n); err != nil {
return nil, err
}
}
return n.Build(ctx)
}
type blankValidator struct{}
func (blankValidator) Validate(_ string, _ []byte) error { return nil }
func (blankValidator) Select(_ string, _ [][]byte) (int, error) { return 0, nil }
// readGenesisCid is a helper function that queries the provided datastore for
// an entry with the genesisKey cid, returning if found.
func readGenesisCid(ds datastore.Datastore) (cid.Cid, error) {
bb, err := ds.Get(chain.GenesisKey)
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to read genesisKey")
}
var c cid.Cid
err = json.Unmarshal(bb, &c)
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to cast genesisCid")
}
return c, nil
}
// buildHost determines if we are publically dialable. If so use public
// Address, if not configure node to announce relay address.
func (nc *Config) buildHost(ctx context.Context, makeDHT func(host host.Host) (routing.IpfsRouting, error)) (host.Host, error) {
// Node must build a host acting as a libp2p relay. Additionally it
// runs the autoNAT service which allows other nodes to check for their
// own dialability by having this node attempt to dial them.
makeDHTRightType := func(h host.Host) (routing.PeerRouting, error) {
return makeDHT(h)
}
if nc.IsRelay {
cfg := nc.Repo.Config()
publicAddr, err := ma.NewMultiaddr(cfg.Swarm.PublicRelayAddress)
if err != nil {
return nil, err
}
publicAddrFactory := func(lc *libp2p.Config) error {
lc.AddrsFactory = func(addrs []ma.Multiaddr) []ma.Multiaddr {
if cfg.Swarm.PublicRelayAddress == "" {
return addrs
}
return append(addrs, publicAddr)
}
return nil
}
relayHost, err := libp2p.New(
ctx,
libp2p.EnableRelay(circuit.OptHop),
libp2p.EnableAutoRelay(),
libp2p.Routing(makeDHTRightType),
publicAddrFactory,
libp2p.ChainOptions(nc.Libp2pOpts...),
)
if err != nil {
return nil, err
}
// Set up autoNATService as a streamhandler on the host.
_, err = autonatsvc.NewAutoNATService(ctx, relayHost)
if err != nil {
return nil, err
}
return relayHost, nil
}
return libp2p.New(
ctx,
libp2p.EnableAutoRelay(),
libp2p.Routing(makeDHTRightType),
libp2p.ChainOptions(nc.Libp2pOpts...),
)
}
// Build instantiates a filecoin Node from the settings specified in the config.
func (nc *Config) Build(ctx context.Context) (*Node, error) {
if nc.Repo == nil {
nc.Repo = repo.NewInMemoryRepo()
}
bs := bstore.NewBlockstore(nc.Repo.Datastore())
validator := blankValidator{}
var peerHost host.Host
var router routing.IpfsRouting
bandwidthTracker := p2pmetrics.NewBandwidthCounter()
nc.Libp2pOpts = append(nc.Libp2pOpts, libp2p.BandwidthReporter(bandwidthTracker))
if !nc.OfflineMode {
makeDHT := func(h host.Host) (routing.IpfsRouting, error) {
r, err := dht.New(
ctx,
h,
dhtopts.Datastore(nc.Repo.Datastore()),
dhtopts.NamespacedValidator("v", validator),
dhtopts.Protocols(filecoinDHTProtocol),
)
if err != nil {
return nil, errors.Wrap(err, "failed to setup routing")
}
router = r
return r, err
}
var err error
peerHost, err = nc.buildHost(ctx, makeDHT)
if err != nil {
return nil, err
}
} else {
router = offroute.NewOfflineRouter(nc.Repo.Datastore(), validator)
peerHost = rhost.Wrap(noopLibP2PHost{}, router)
}
// set up pinger
pinger := ping.NewPingService(peerHost)
// set up bitswap
nwork := bsnet.NewFromIpfsHost(peerHost, router)
//nwork := bsnet.NewFromIpfsHost(innerHost, router)
bswap := bitswap.New(ctx, nwork, bs)
bservice := bserv.New(bs, bswap)
fetcher := net.NewFetcher(ctx, bservice)
cstOffline := hamt.CborIpldStore{Blocks: bserv.New(bs, offline.Exchange(bs))}
genCid, err := readGenesisCid(nc.Repo.Datastore())
if err != nil {
return nil, err
}
// set up chainstore
chainStore := chain.NewDefaultStore(nc.Repo.ChainDatastore(), &cstOffline, genCid)
powerTable := &consensus.MarketView{}
// set up processor
var processor consensus.Processor
if nc.Rewarder == nil {
processor = consensus.NewDefaultProcessor()
} else {
processor = consensus.NewConfiguredProcessor(consensus.NewDefaultMessageValidator(), nc.Rewarder)
}
// set up consensus
var nodeConsensus consensus.Protocol
if nc.Verifier == nil {
nodeConsensus = consensus.NewExpected(&cstOffline, bs, processor, powerTable, genCid, &proofs.RustVerifier{})
} else {
nodeConsensus = consensus.NewExpected(&cstOffline, bs, processor, powerTable, genCid, nc.Verifier)
}
// only the syncer gets the storage which is online connected
chainSyncer := chain.NewDefaultSyncer(&cstOffline, nodeConsensus, chainStore, fetcher)
msgPool := core.NewMessagePool(chainStore)
outbox := core.NewMessageQueue()
// Set up libp2p pubsub
fsub, err := libp2pps.NewFloodSub(ctx, peerHost)
if err != nil {
return nil, errors.Wrap(err, "failed to set up pubsub")
}
backend, err := wallet.NewDSBackend(nc.Repo.WalletDatastore())
if err != nil {
return nil, errors.Wrap(err, "failed to set up wallet backend")
}
fcWallet := wallet.New(backend)
PorcelainAPI := porcelain.New(plumbing.New(&plumbing.APIDeps{
Chain: chainStore,
Config: cfg.NewConfig(nc.Repo),
DAG: dag.NewDAG(merkledag.NewDAGService(bservice)),
Deals: strgdls.New(nc.Repo.DealsDatastore()),
MsgPool: msgPool,
MsgPreviewer: msg.NewPreviewer(fcWallet, chainStore, &cstOffline, bs),
MsgQueryer: msg.NewQueryer(nc.Repo, fcWallet, chainStore, &cstOffline, bs),
MsgSender: msg.NewSender(fcWallet, chainStore, chainStore, outbox, msgPool, consensus.NewOutboundMessageValidator(), fsub.Publish),
MsgWaiter: msg.NewWaiter(chainStore, bs, &cstOffline),
Network: net.New(peerHost, pubsub.NewPublisher(fsub), pubsub.NewSubscriber(fsub), net.NewRouter(router), bandwidthTracker, pinger),
Outbox: outbox,
SigGetter: mthdsig.NewGetter(chainStore),
Wallet: fcWallet,
}))
nd := &Node{
blockservice: bservice,
Blockstore: bs,
cborStore: &cstOffline,
Consensus: nodeConsensus,
ChainReader: chainStore,
Syncer: chainSyncer,
PowerTable: powerTable,
PorcelainAPI: PorcelainAPI,
Fetcher: fetcher,
Exchange: bswap,
host: peerHost,
MsgPool: msgPool,
Outbox: outbox,
OfflineMode: nc.OfflineMode,
PeerHost: peerHost,
Repo: nc.Repo,
Wallet: fcWallet,
blockTime: nc.BlockTime,
Router: router,
}
// set up mining worker funcs
nd.GetAncestorsFunc = nd.getAncestors
nd.GetStateTreeFunc = nd.getStateTree
nd.GetWeightFunc = nd.getWeight
// Bootstrapping network peers.
periodStr := nd.Repo.Config().Bootstrap.Period
period, err := time.ParseDuration(periodStr)
if err != nil {
return nil, errors.Wrapf(err, "couldn't parse bootstrap period %s", periodStr)
}
// Bootstrapper maintains connections to some subset of addresses
ba := nd.Repo.Config().Bootstrap.Addresses
bpi, err := net.PeerAddrsToPeerInfos(ba)
if err != nil {
return nil, errors.Wrapf(err, "couldn't parse bootstrap addresses [%s]", ba)
}
minPeerThreshold := nd.Repo.Config().Bootstrap.MinPeerThreshold
nd.Bootstrapper = net.NewBootstrapper(bpi, nd.Host(), nd.Host().Network(), nd.Router, minPeerThreshold, period)
return nd, nil
}
// Start boots up the node.
func (node *Node) Start(ctx context.Context) error {
if err := metrics.RegisterPrometheusEndpoint(node.Repo.Config().Metrics); err != nil {
return errors.Wrap(err, "failed to setup metrics")
}
var err error
if err = node.ChainReader.Load(ctx); err != nil {
return err
}
// Only set these up if there is a miner configured.
if _, err := node.miningAddress(); err == nil {
if err := node.setupMining(ctx); err != nil {
log.Errorf("setup mining failed: %v", err)
return err
}
}
// Start up 'hello' handshake service
syncCallBack := func(pid libp2ppeer.ID, cids []cid.Cid, height uint64) {
// TODO it is possible the syncer interface should be modified to
// make use of the additional context not used here (from addr + height).
// To keep things simple for now this info is not used.
err := node.Syncer.HandleNewBlocks(context.Background(), cids)
if err != nil {
log.Infof("error handling blocks: %s", types.NewSortedCidSet(cids...).String())
}
}
node.HelloSvc = hello.New(node.Host(), node.ChainReader.GenesisCid(), syncCallBack, node.ChainReader.Head, node.Repo.Config().Net, flags.Commit)
err = node.setupProtocols()
if err != nil {
return errors.Wrap(err, "failed to set up protocols:")
}
node.RetrievalMiner = retrieval.NewMiner(node)
// subscribe to block notifications
blkSub, err := node.PorcelainAPI.PubSubSubscribe(BlockTopic)
if err != nil {
return errors.Wrap(err, "failed to subscribe to blocks topic")
}
node.BlockSub = blkSub
// subscribe to message notifications
msgSub, err := node.PorcelainAPI.PubSubSubscribe(msg.Topic)
if err != nil {
return errors.Wrap(err, "failed to subscribe to message topic")
}
node.MessageSub = msgSub
cctx, cancel := context.WithCancel(context.Background())
node.cancelSubscriptionsCtx = cancel
go node.handleSubscription(cctx, node.processBlock, "processBlock", node.BlockSub, "BlockSub")
go node.handleSubscription(cctx, node.processMessage, "processMessage", node.MessageSub, "MessageSub")
outboxPolicy := core.NewMessageQueuePolicy(node.Outbox, node.ChainReadStore(), core.OutboxMaxAgeRounds)
node.HeaviestTipSetHandled = func() {}
node.HeaviestTipSetCh = node.ChainReader.HeadEvents().Sub(chain.NewHeadTopic)
go node.handleNewHeaviestTipSet(cctx, node.ChainReader.Head(), outboxPolicy)
if !node.OfflineMode {
node.Bootstrapper.Start(context.Background())
}
if err := node.setupHeartbeatServices(ctx); err != nil {
return errors.Wrap(err, "failed to start heartbeat services")
}
return nil
}
func (node *Node) setupHeartbeatServices(ctx context.Context) error {
mag := func() address.Address {
addr, err := node.miningAddress()
// the only error miningAddress() returns is ErrNoMinerAddress.
// if there is no configured miner address, simply send a zero
// address across the wire.
if err != nil {
return address.Undef
}
return addr
}
// start the primary heartbeat service
if len(node.Repo.Config().Heartbeat.BeatTarget) > 0 {
hbs := metrics.NewHeartbeatService(node.Host(), node.Repo.Config().Heartbeat, node.ChainReader.Head, metrics.WithMinerAddressGetter(mag))
go hbs.Start(ctx)
}
// check if we want to connect to an alert service. An alerting service is a heartbeat
// service that can trigger alerts based on the contents of heatbeats.
if alertTarget := os.Getenv("FIL_HEARTBEAT_ALERTS"); len(alertTarget) > 0 {
ahbs := metrics.NewHeartbeatService(node.Host(), &config.HeartbeatConfig{
BeatTarget: alertTarget,
BeatPeriod: "10s",
ReconnectPeriod: "10s",
Nickname: node.Repo.Config().Heartbeat.Nickname,
}, node.ChainReader.Head, metrics.WithMinerAddressGetter(mag))
go ahbs.Start(ctx)
}
return nil
}
func (node *Node) setupMining(ctx context.Context) error {
// configure the underlying sector store, defaulting to the non-test version
sectorStoreType := proofs.Live
if os.Getenv("FIL_USE_SMALL_SECTORS") == "true" {
sectorStoreType = proofs.Test
}
// initialize a sector builder
sectorBuilder, err := initSectorBuilderForNode(ctx, node, sectorStoreType)
if err != nil {
return errors.Wrap(err, "failed to initialize sector builder")
}
node.sectorBuilder = sectorBuilder
return nil
}
func (node *Node) setIsMining(isMining bool) {
node.mining.Lock()
defer node.mining.Unlock()
node.mining.isMining = isMining
}
func (node *Node) handleNewMiningOutput(miningOutCh <-chan mining.Output) {
defer func() {
node.miningDoneWg.Done()
}()
for {
select {
case <-node.miningCtx.Done():
return
case output, ok := <-miningOutCh:
if !ok {
return
}
if output.Err != nil {
log.Errorf("stopping mining. error: %s", output.Err.Error())
node.StopMining(context.Background())
} else {
node.miningDoneWg.Add(1)
go func() {
if node.IsMining() {
node.AddNewlyMinedBlock(node.miningCtx, output.NewBlock)
}
node.miningDoneWg.Done()
}()
}
}
}
}
func (node *Node) handleNewHeaviestTipSet(ctx context.Context, head types.TipSet, outboxPolicy *core.MessageQueuePolicy) {
for {
select {
case ts, ok := <-node.HeaviestTipSetCh:
if !ok {
return
}
newHead, ok := ts.(types.TipSet)
if !ok {
log.Error("non-tipset published on heaviest tipset channel")
continue
}
if len(newHead) == 0 {
log.Error("tipset of size 0 published on heaviest tipset channel. ignoring and waiting for a new heaviest tipset.")
continue
}
if err := outboxPolicy.OnNewHeadTipset(ctx, head, newHead); err != nil {
log.Error("updating outbound message queue for new tipset", err)
}
if err := node.MsgPool.UpdateMessagePool(ctx, node.ChainReadStore(), head, newHead); err != nil {
log.Error("updating message pool for new tipset", err)
}
head = newHead
if node.StorageMiner != nil {
node.StorageMiner.OnNewHeaviestTipSet(newHead)
}
node.HeaviestTipSetHandled()
case <-ctx.Done():
return
}
}
}
func (node *Node) cancelSubscriptions() {
if node.BlockSub != nil || node.MessageSub != nil {
node.cancelSubscriptionsCtx()
}
if node.BlockSub != nil {
node.BlockSub.Cancel()
node.BlockSub = nil
}
if node.MessageSub != nil {
node.MessageSub.Cancel()
node.MessageSub = nil
}
}
// Stop initiates the shutdown of the node.
func (node *Node) Stop(ctx context.Context) {
node.ChainReader.HeadEvents().Unsub(node.HeaviestTipSetCh)
node.StopMining(ctx)
node.cancelSubscriptions()
node.ChainReader.Stop()
if node.SectorBuilder() != nil {
if err := node.SectorBuilder().Close(); err != nil {
fmt.Printf("error closing sector builder: %s\n", err)
}
node.sectorBuilder = nil
}
if err := node.Host().Close(); err != nil {
fmt.Printf("error closing host: %s\n", err)
}
if err := node.Repo.Close(); err != nil {
fmt.Printf("error closing repo: %s\n", err)
}
node.Bootstrapper.Stop()
fmt.Println("stopping filecoin :(")
}
type newBlockFunc func(context.Context, *types.Block)
func (node *Node) addNewlyMinedBlock(ctx context.Context, b *types.Block) {
log.Debugf("Got a newly mined block from the mining worker: %s", b)
if err := node.AddNewBlock(ctx, b); err != nil {
log.Warningf("error adding new mined block: %s. err: %s", b.Cid().String(), err.Error())
}
}
// miningAddress returns the address of the mining actor mining on behalf of
// the node.
func (node *Node) miningAddress() (address.Address, error) {
addr := node.Repo.Config().Mining.MinerAddress
if addr.Empty() {
return address.Undef, ErrNoMinerAddress
}
return addr, nil
}
// MiningTimes returns the configured time it takes to mine a block, and also
// the mining delay duration, which is currently a fixed fraction of block time.
// Note this is mocked behavior, in production this time is determined by how
// long it takes to generate PoSTs.
func (node *Node) MiningTimes() (time.Duration, time.Duration) {
mineDelay := node.GetBlockTime() / mining.MineDelayConversionFactor
return node.GetBlockTime(), mineDelay
}
// GetBlockTime returns the current block time.
// TODO this should be surfaced somewhere in the plumbing API.
func (node *Node) GetBlockTime() time.Duration {
return node.blockTime
}
// SetBlockTime sets the block time.
func (node *Node) SetBlockTime(blockTime time.Duration) {
node.blockTime = blockTime
}
// StartMining causes the node to start feeding blocks to the mining worker and initializes
// the SectorBuilder for the mining address.
func (node *Node) StartMining(ctx context.Context) error {
if node.IsMining() {
return errors.New("Node is already mining")
}
minerAddr, err := node.miningAddress()
if err != nil {
return errors.Wrap(err, "failed to get mining address")
}
// ensure we have a sector builder
if node.SectorBuilder() == nil {
if err := node.setupMining(ctx); err != nil {
return err
}
}
minerOwnerAddr, err := node.miningOwnerAddress(ctx, minerAddr)
if err != nil {
return errors.Wrapf(err, "failed to get mining owner address for miner %s", minerAddr)
}
_, mineDelay := node.MiningTimes()
if node.MiningWorker == nil {
if node.MiningWorker, err = node.CreateMiningWorker(ctx); err != nil {
return err
}
}
if node.MiningScheduler == nil {
node.MiningScheduler = mining.NewScheduler(node.MiningWorker, mineDelay, node.ChainReader.Head)
}
// paranoid check
if !node.MiningScheduler.IsStarted() {
node.miningCtx, node.cancelMining = context.WithCancel(context.Background())
outCh, doneWg := node.MiningScheduler.Start(node.miningCtx)
node.miningDoneWg = doneWg
node.AddNewlyMinedBlock = node.addNewlyMinedBlock
node.miningDoneWg.Add(1)
go node.handleNewMiningOutput(outCh)
}
// initialize a storage miner
storageMiner, err := initStorageMinerForNode(ctx, node)
if err != nil {
return errors.Wrap(err, "failed to initialize storage miner")
}
node.StorageMiner = storageMiner
// loop, turning sealing-results into commitSector messages to be included
// in the chain
go func() {
for {
select {
case result := <-node.SectorBuilder().SectorSealResults():
if result.SealingErr != nil {
log.Errorf("failed to seal sector with id %d: %s", result.SectorID, result.SealingErr.Error())
} else if result.SealingResult != nil {
// TODO: determine these algorithmically by simulating call and querying historical prices
gasPrice := types.NewGasPrice(0)
gasUnits := types.NewGasUnits(300)
val := result.SealingResult
// This call can fail due to, e.g. nonce collisions. Our miners existence depends on this.
// We should deal with this, but MessageSendWithRetry is problematic.
_, err := node.PorcelainAPI.MessageSend(
node.miningCtx,
minerOwnerAddr,
minerAddr,
nil,
gasPrice,
gasUnits,
"commitSector",
val.SectorID,
val.CommD[:],
val.CommR[:],
val.CommRStar[:],
val.Proof[:],
)
if err != nil {
log.Errorf("failed to send commitSector message from %s to %s for sector with id %d: %s", minerOwnerAddr, minerAddr, val.SectorID, err)
continue
}
node.StorageMiner.OnCommitmentAddedToChain(val, nil)
}
case <-node.miningCtx.Done():
return
}
}
}()
// schedules sealing of staged piece-data
if node.Repo.Config().Mining.AutoSealIntervalSeconds > 0 {
go func() {
for {
select {
case <-node.miningCtx.Done():
return
case <-time.After(time.Duration(node.Repo.Config().Mining.AutoSealIntervalSeconds) * time.Second):
log.Info("auto-seal has been triggered")
if err := node.SectorBuilder().SealAllStagedSectors(node.miningCtx); err != nil {
log.Errorf("scheduler received error from node.SectorBuilder.SealAllStagedSectors (%s) - exiting", err.Error())
return
}
}
}
}()
} else {
log.Debug("auto-seal is disabled")
}
node.setIsMining(true)
return nil
}
func (node *Node) getLastUsedSectorID(ctx context.Context, minerAddr address.Address) (uint64, error) {
rets, methodSignature, err := node.PorcelainAPI.MessageQuery(
ctx,
address.Address{},
minerAddr,
"getLastUsedSectorID",
)
if err != nil {
return 0, errors.Wrap(err, "failed to call query method getLastUsedSectorID")
}
lastUsedSectorIDVal, err := abi.Deserialize(rets[0], methodSignature.Return[0])
if err != nil {
return 0, errors.Wrap(err, "failed to convert returned ABI value")
}
lastUsedSectorID, ok := lastUsedSectorIDVal.Val.(uint64)
if !ok {
return 0, errors.New("failed to convert returned ABI value to uint64")
}
return lastUsedSectorID, nil
}
func initSectorBuilderForNode(ctx context.Context, node *Node, sectorStoreType proofs.SectorStoreType) (sectorbuilder.SectorBuilder, error) {
minerAddr, err := node.miningAddress()
if err != nil {
return nil, errors.Wrap(err, "failed to get node's mining address")
}
lastUsedSectorID, err := node.getLastUsedSectorID(ctx, minerAddr)
if err != nil {
return nil, errors.Wrapf(err, "failed to get last used sector id for miner w/address %s", minerAddr.String())
}
// TODO: Where should we store the RustSectorBuilder metadata? Currently, we
// configure the RustSectorBuilder to store its metadata in the staging
// directory.
cfg := sectorbuilder.RustSectorBuilderConfig{
BlockService: node.blockservice,
LastUsedSectorID: lastUsedSectorID,
MetadataDir: node.Repo.StagingDir(),
MinerAddr: minerAddr,
SealedSectorDir: node.Repo.SealedDir(),
SectorStoreType: sectorStoreType,
StagedSectorDir: node.Repo.StagingDir(),
}
sb, err := sectorbuilder.NewRustSectorBuilder(cfg)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("failed to initialize sector builder for miner %s", minerAddr.String()))
}
return sb, nil
}
func initStorageMinerForNode(ctx context.Context, node *Node) (*storage.Miner, error) {
minerAddr, err := node.miningAddress()
if err != nil {
return nil, errors.Wrap(err, "failed to get node's mining address")
}
miningOwnerAddr, err := node.miningOwnerAddress(ctx, minerAddr)
if err != nil {
return nil, errors.Wrap(err, "no mining owner available, skipping storage miner setup")
}
miner, err := storage.NewMiner(minerAddr, miningOwnerAddr, node, node.Repo.DealsDatastore(), node.PorcelainAPI)
if err != nil {
return nil, errors.Wrap(err, "failed to instantiate storage miner")
}
return miner, nil
}
// StopMining stops mining on new blocks.
func (node *Node) StopMining(ctx context.Context) {
node.setIsMining(false)
if node.cancelMining != nil {
node.cancelMining()
}
if node.miningDoneWg != nil {
node.miningDoneWg.Wait()
}
// TODO: stop node.StorageMiner
}
// NewAddress creates a new account address on the default wallet backend.
func (node *Node) NewAddress() (address.Address, error) {
return wallet.NewAddress(node.Wallet)
}
// miningOwnerAddress returns the owner of miningAddr.
// TODO: find a better home for this method
func (node *Node) miningOwnerAddress(ctx context.Context, miningAddr address.Address) (address.Address, error) {
ownerAddr, err := node.PorcelainAPI.MinerGetOwnerAddress(ctx, miningAddr)
if err != nil {
return address.Undef, errors.Wrap(err, "failed to get miner owner address")
}
return ownerAddr, nil
}
// BlockHeight returns the current block height of the chain.
func (node *Node) BlockHeight() (*types.BlockHeight, error) {
head := node.ChainReader.Head()
if head == nil {
return nil, errors.New("invalid nil head")
}
height, err := head.Height()
if err != nil {
return nil, err
}
return types.NewBlockHeight(height), nil
}
func (node *Node) handleSubscription(ctx context.Context, f pubSubProcessorFunc, fname string, s pubsub.Subscription, sname string) {
for {
pubSubMsg, err := s.Next(ctx)
if err != nil {
log.Errorf("%s.Next(): %s", sname, err)
return
}
if err := f(ctx, pubSubMsg); err != nil {
if err != context.Canceled {
log.Errorf("%s(): %s", fname, err)
}
}
}
}
// setupProtocols creates protocol clients and miners, then sets the node's APIs
// for each
func (node *Node) setupProtocols() error {
_, mineDelay := node.MiningTimes()
blockMiningAPI := block.New(
node.AddNewBlock,
node.ChainReader,
mineDelay,
node.StartMining,
node.StopMining,
node.CreateMiningWorker)
node.BlockMiningAPI = &blockMiningAPI
// set up retrieval client and api
retapi := retrieval.NewAPI(retrieval.NewClient(node.host, node.blockTime))
node.RetrievalAPI = &retapi
// set up storage client and api
smc := storage.NewClient(node.blockTime, node.host, node.PorcelainAPI)
smcAPI := storage.NewAPI(smc)
node.StorageAPI = &smcAPI
return nil
}
// CreateMiningWorker creates a mining.Worker for the node using the configured
// getStateTree, getWeight, and getAncestors functions for the node
func (node *Node) CreateMiningWorker(ctx context.Context) (mining.Worker, error) {
processor := consensus.NewDefaultProcessor()
minerAddr, err := node.miningAddress()
if err != nil {
return nil, errors.Wrap(err, "failed to get mining address")
}
minerPubKey, err := node.PorcelainAPI.MinerGetKey(ctx, minerAddr)
if err != nil {
return nil, errors.Wrap(err, "could not get key from miner actor")
}
minerOwnerAddr, err := node.miningOwnerAddress(ctx, minerAddr)
if err != nil {
log.Errorf("could not get owner address of miner actor")
return nil, err
}
return mining.NewDefaultWorker(
node.MsgPool, node.getStateTree, node.getWeight, node.getAncestors, processor, node.PowerTable,
node.Blockstore, node.CborStore(), minerAddr, minerOwnerAddr, minerPubKey,
node.Wallet, node.blockTime), nil
}
// getStateFromKey returns the state tree based on tipset fetched with provided key tsKey
func (node *Node) getStateFromKey(ctx context.Context, tsKey string) (state.Tree, error) {
tsas, err := node.ChainReader.GetTipSetAndState(ctx, tsKey)
if err != nil {
return nil, err
}
return state.LoadStateTree(ctx, node.CborStore(), tsas.TipSetStateRoot, builtin.Actors)
}
// getStateTree is the default GetStateTree function for the mining worker.
func (node *Node) getStateTree(ctx context.Context, ts types.TipSet) (state.Tree, error) {
return node.getStateFromKey(ctx, ts.String())
}
// getWeight is the default GetWeight function for the mining worker.
func (node *Node) getWeight(ctx context.Context, ts types.TipSet) (uint64, error) {
parent, err := ts.Parents()
if err != nil {
return uint64(0), err
}
// TODO handle genesis cid more gracefully
if parent.Len() == 0 {
return node.Consensus.Weight(ctx, ts, nil)
}
pSt, err := node.getStateFromKey(ctx, parent.String())
if err != nil {
return uint64(0), err
}
return node.Consensus.Weight(ctx, ts, pSt)
}
// getAncestors is the default GetAncestors function for the mining worker.
func (node *Node) getAncestors(ctx context.Context, ts types.TipSet, newBlockHeight *types.BlockHeight) ([]types.TipSet, error) {
return chain.GetRecentAncestors(ctx, ts, node.ChainReader, newBlockHeight, consensus.AncestorRoundsNeeded, sampling.LookbackParameter)
}
// -- Accessors
// Host returns the nodes host.
func (node *Node) Host() host.Host {
return node.host
}
// SectorBuilder returns the nodes sectorBuilder.
func (node *Node) SectorBuilder() sectorbuilder.SectorBuilder {
return node.sectorBuilder
}
// BlockService returns the nodes blockservice.
func (node *Node) BlockService() bserv.BlockService {
return node.blockservice
}
// CborStore returns the nodes cborStore.
func (node *Node) CborStore() *hamt.CborIpldStore {
return node.cborStore
}
// ChainReadStore returns the node's chain store.
func (node *Node) ChainReadStore() chain.ReadStore {
return node.ChainReader
}
// IsMining returns a boolean indicating whether the node is mining blocks.
func (node *Node) IsMining() bool {
node.mining.Lock()
defer node.mining.Unlock()
return node.mining.isMining
}
| 1 | 18,293 | can we just kill this TODO? | filecoin-project-venus | go |
@@ -33,7 +33,7 @@ func TestDAGGet(t *testing.T) {
dag := NewDAG(dserv)
_, err := dag.GetNode(ctx, "awful")
- assert.EqualError(t, err, "invalid 'ipfs ref' path")
+ assert.EqualError(t, err, "invalid path \"awful\": selected encoding not supported")
})
t.Run("ILPD node not found results in error", func(t *testing.T) { | 1 | package dag
import (
"context"
"testing"
"time"
"github.com/filecoin-project/go-address"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-datastore"
blockstore "github.com/ipfs/go-ipfs-blockstore"
offline "github.com/ipfs/go-ipfs-exchange-offline"
format "github.com/ipfs/go-ipld-format"
"github.com/ipfs/go-merkledag"
"github.com/stretchr/testify/assert"
"github.com/filecoin-project/go-filecoin/internal/pkg/chain"
tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags"
"github.com/filecoin-project/go-filecoin/internal/pkg/types"
)
func TestDAGGet(t *testing.T) {
tf.UnitTest(t)
t.Run("invalid ref", func(t *testing.T) {
ctx := context.Background()
mds := datastore.NewMapDatastore()
bs := blockstore.NewBlockstore(mds)
offl := offline.Exchange(bs)
blkserv := blockservice.New(bs, offl)
dserv := merkledag.NewDAGService(blkserv)
dag := NewDAG(dserv)
_, err := dag.GetNode(ctx, "awful")
assert.EqualError(t, err, "invalid 'ipfs ref' path")
})
t.Run("ILPD node not found results in error", func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*200)
defer cancel()
mds := datastore.NewMapDatastore()
bs := blockstore.NewBlockstore(mds)
offl := offline.Exchange(bs)
blkserv := blockservice.New(bs, offl)
dserv := merkledag.NewDAGService(blkserv)
dag := NewDAG(dserv)
someCid := types.CidFromString(t, "somecid")
_, err := dag.GetNode(ctx, someCid.String())
assert.EqualError(t, err, "merkledag: not found")
})
t.Run("matching IPLD node is emitted", func(t *testing.T) {
ctx := context.Background()
mds := datastore.NewMapDatastore()
bs := blockstore.NewBlockstore(mds)
offl := offline.Exchange(bs)
blkserv := blockservice.New(bs, offl)
dserv := merkledag.NewDAGService(blkserv)
dag := NewDAG(dserv)
ipldnode := chain.NewBuilder(t, address.Undef).NewGenesis().At(0).ToNode()
// put into out blockservice
assert.NoError(t, blkserv.AddBlock(ipldnode))
res, err := dag.GetNode(ctx, ipldnode.Cid().String())
assert.NoError(t, err)
nodeBack, ok := res.(format.Node)
assert.True(t, ok)
assert.Equal(t, ipldnode.Cid().String(), nodeBack.Cid().String())
})
}
| 1 | 23,573 | This appears to be a change in error handling due to a dag upgrade. | filecoin-project-venus | go |
@@ -246,7 +246,9 @@ module Beaker
end
it 'calls beaker-hostgenerator to get hosts information' do
- parser.instance_variable_set( :@options, {} )
+ parser.instance_variable_set( :@options, {
+ :hosts_file => 'notafile.yml'
+ } )
allow( Beaker::Options::HostsFileParser ).to receive(
:parse_hosts_file
).and_raise( Errno::ENOENT ) | 1 | require "spec_helper"
module Beaker
module Options
describe Parser do
let(:parser) { Parser.new }
let(:opts_path) { File.join(File.expand_path(File.dirname(__FILE__)), "data", "opts.txt") }
let(:hosts_path) { File.join(File.expand_path(File.dirname(__FILE__)), "data", "hosts.cfg") }
it "supports usage function" do
expect { parser.usage }.to_not raise_error
end
describe 'parse_git_repos' do
it "transforms arguments of <PROJECT_NAME>/<REF> to <GIT_BASE_URL>/<lowercased_project_name>#<REF>" do
opts = ["PUPPET/3.1"]
expect(parser.parse_git_repos(opts)).to be === ["#{parser.repo}/puppet.git#3.1"]
end
it "recognizes PROJECT_NAMEs of PUPPET, FACTER, HIERA, and HIERA-PUPPET" do
projects = [['puppet', 'my_branch', 'PUPPET/my_branch'],
['facter', 'my_branch', 'FACTER/my_branch'],
['hiera', 'my_branch', 'HIERA/my_branch'],
['hiera-puppet', 'my_branch', 'HIERA-PUPPET/my_branch']]
projects.each do |project, ref, input|
expect(parser.parse_git_repos([input])).to be === ["#{parser.repo}/#{project}.git##{ref}"]
end
end
end
describe 'split_arg' do
it "can split comma separated list into an array" do
arg = "file1,file2,file3"
expect(parser.split_arg(arg)).to be === ["file1", "file2", "file3"]
end
it "can use an existing Array as an acceptable argument" do
arg = ["file1", "file2", "file3"]
expect(parser.split_arg(arg)).to be === ["file1", "file2", "file3"]
end
it "can generate an array from a single value" do
arg = "i'mjustastring"
expect(parser.split_arg(arg)).to be === ["i'mjustastring"]
end
end
context 'testing path traversing' do
let(:test_dir) { 'tmp/tests' }
let(:rb_test) { File.expand_path(test_dir + '/my_ruby_file.rb') }
let(:pl_test) { File.expand_path(test_dir + '/my_perl_file.pl') }
let(:sh_test) { File.expand_path(test_dir + '/my_shell_file.sh') }
let(:rb_other) { File.expand_path(test_dir + '/other/my_other_ruby_file.rb') }
it 'only collects ruby files as test files' do
files = [rb_test, pl_test, sh_test, rb_other]
create_files(files)
expect(parser.file_list([File.expand_path(test_dir)])).to be === [rb_test, rb_other]
end
it 'raises an error when no ruby files are found' do
files = [pl_test, sh_test]
create_files(files)
expect { parser.file_list([File.expand_path(test_dir)]) }.to raise_error(ArgumentError)
end
it 'raises an error when no paths are specified for searching' do
@files = ''
expect { parser.file_list('') }.to raise_error(ArgumentError)
end
end
context 'combining split_arg and file_list maintain test file ordering' do
let(:test_dir) { 'tmp/tests' }
let(:other_test_dir) { 'tmp/tests2' }
before :each do
files = [
'00_EnvSetup.rb', '035_StopFirewall.rb', '05_HieraSetup.rb',
'01_TestSetup.rb', '03_PuppetMasterSanity.rb',
'06_InstallModules.rb', '02_PuppetUserAndGroup.rb',
'04_ValidateSignCert.rb', '07_InstallCACerts.rb']
@lone_file = '08_foss.rb'
@fileset1 = files.shuffle.map { |file| test_dir + '/' + file }
@fileset2 = files.shuffle.map { |file| other_test_dir + '/' + file }
@sorted_expanded_fileset1 = @fileset1.map { |f| File.expand_path(f) }.sort
@sorted_expanded_fileset2 = @fileset2.map { |f| File.expand_path(f) }.sort
create_files(@fileset1)
create_files(@fileset2)
create_files([@lone_file])
end
it "when provided a file followed by dir, runs the file first" do
arg = "#{@lone_file},#{test_dir}"
output = parser.file_list(parser.split_arg(arg))
expect(output).to be === [@lone_file, @sorted_expanded_fileset1].flatten
end
it "when provided a dir followed by a file, runs the file last" do
arg = "#{test_dir},#{@lone_file}"
output = parser.file_list(parser.split_arg(arg))
expect(output).to be === [@sorted_expanded_fileset1, @lone_file].flatten
end
it "correctly orders files in a directory" do
arg = "#{test_dir}"
output = parser.file_list(parser.split_arg(arg))
expect(output).to be === @sorted_expanded_fileset1
end
it "when provided two directories orders each directory separately" do
arg = "#{test_dir}/,#{other_test_dir}/"
output = parser.file_list(parser.split_arg(arg))
expect(output).to be === @sorted_expanded_fileset1 + @sorted_expanded_fileset2
end
end
describe '#parse_args' do
before { FakeFS.deactivate! }
it 'pulls the args into key called :command_line' do
my_args = ['--log-level', 'debug', '-h', hosts_path]
expect(parser.parse_args(my_args)[:command_line]).to include(my_args.join(' '))
end
describe 'does prioritization correctly' do
let(:env) { @env || {:level => 'highest'} }
let(:argv) { @argv || {:level => 'second'} }
let(:host_file) { @host_file || {:level => 'third'} }
let(:opt_file) { @opt_file || {:level => 'fourth'} }
let(:presets) { {:level => 'lowest'} }
before :each do
expect(parser).to receive(:normalize_args).and_return(true)
end
def mock_out_parsing
presets_obj = double()
allow(presets_obj).to receive(:presets).and_return(presets)
allow(presets_obj).to receive(:env_vars).and_return(env)
parser.instance_variable_set(:@presets, presets_obj)
command_line_parser_obj = double()
allow(command_line_parser_obj).to receive(:parse).and_return(argv)
parser.instance_variable_set(:@command_line_parser, command_line_parser_obj)
allow(OptionsFileParser).to receive(:parse_options_file).and_return(opt_file)
allow(parser).to receive(:parse_hosts_options).and_return(host_file)
end
it 'presets have the lowest priority' do
@env = @argv = @host_file = @opt_file = {}
mock_out_parsing
opts = parser.parse_args([])
expect(opts[:level]).to be == 'lowest'
end
it 'options file has fourth priority' do
@env = @argv = @host_file = {}
mock_out_parsing
opts = parser.parse_args([])
expect(opts[:level]).to be == 'fourth'
end
it 'host file CONFIG section has third priority' do
@env = @argv = {}
mock_out_parsing
opts = parser.parse_args([])
expect(opts[:level]).to be == 'third'
end
it 'command line arguments have second priority' do
@env = {}
mock_out_parsing
opts = parser.parse_args([])
expect(opts[:level]).to be == 'second'
end
it 'env vars have highest priority' do
mock_out_parsing
opts = parser.parse_args([])
expect(opts[:level]).to be == 'highest'
end
end
it "can correctly combine arguments from different sources" do
build_url = 'http://my.build.url/'
type = 'git'
log_level = 'debug'
old_build_url = ENV["BUILD_URL"]
ENV["BUILD_URL"] = build_url
args = ["-h", hosts_path, "--log-level", log_level, "--type", type, "--install", "PUPPET/1.0,HIERA/hello"]
output = parser.parse_args(args)
expect(output[:hosts_file]).to be == hosts_path
expect(output[:jenkins_build_url]).to be == build_url
expect(output[:install]).to include('git://github.com/puppetlabs/hiera.git#hello')
ENV["BUILD_URL"] = old_build_url
end
it "ensures that fail-mode is one of fast/slow" do
args = ["-h", hosts_path, "--log-level", "debug", "--fail-mode", "nope"]
expect { parser.parse_args(args) }.to raise_error(ArgumentError)
end
end
describe '#parse_hosts_options' do
context 'Hosts file exists' do
before :each do
allow(File).to receive(:exists?).and_return(true)
end
it 'returns the parser\'s output' do
parser.instance_variable_set( :@options, {} )
test_value = 'blaqwetjijl,emikfuj1235'
allow( Beaker::Options::HostsFileParser ).to receive(
:parse_hosts_file
).and_return( test_value )
val1, _ = parser.parse_hosts_options
expect( val1 ).to be === test_value
end
end
context 'Hosts file does not exist' do
require 'beaker-hostgenerator'
before :each do
allow(File).to receive(:exists?).and_return(false)
end
it 'calls beaker-hostgenerator to get hosts information' do
parser.instance_variable_set( :@options, {} )
allow( Beaker::Options::HostsFileParser ).to receive(
:parse_hosts_file
).and_raise( Errno::ENOENT )
mock_beaker_hostgenerator_cli = Object.new
cli_execute_return = 'job150865'
expect( mock_beaker_hostgenerator_cli ).to receive(
:execute
).and_return( cli_execute_return )
expect( BeakerHostGenerator::CLI ).to receive(
:new
).and_return( mock_beaker_hostgenerator_cli )
allow( Beaker::Options::HostsFileParser ).to receive(
:parse_hosts_string
).with( cli_execute_return )
parser.parse_hosts_options
end
it 'sets the :hosts_file_generated flag to signal others when needed' do
options_test = {}
parser.instance_variable_set( :@options, options_test )
allow( Beaker::Options::HostsFileParser ).to receive(
:parse_hosts_file
).and_raise( Errno::ENOENT )
mock_beaker_hostgenerator_cli = Object.new
allow( mock_beaker_hostgenerator_cli ).to receive( :execute )
allow( BeakerHostGenerator::CLI ).to receive(
:new
).and_return( mock_beaker_hostgenerator_cli )
allow( Beaker::Options::HostsFileParser ).to receive( :parse_hosts_string )
parser.parse_hosts_options
expect( options_test[:hosts_file_generated] ).to be true
end
it 'beaker-hostgenerator failures trigger nice prints & a rethrow' do
options_test = {}
parser.instance_variable_set( :@options, options_test )
allow( Beaker::Options::HostsFileParser ).to receive(
:parse_hosts_file
).and_raise( Errno::ENOENT )
mock_beaker_hostgenerator_cli = Object.new
expect( BeakerHostGenerator::CLI ).to receive(
:new
).and_return( mock_beaker_hostgenerator_cli )
expect( mock_beaker_hostgenerator_cli ).to receive(
:execute
).and_raise( BeakerHostGenerator::Exceptions::InvalidNodeSpecError )
expect( Beaker::Options::HostsFileParser ).not_to receive( :parse_hosts_string )
expect( $stdout ).to receive( :puts ).with(
/does not exist/
).ordered
expect( $stderr ).to receive( :puts ).with(
/Exiting with an Error/
).ordered
expect {
parser.parse_hosts_options
}.to raise_error( BeakerHostGenerator::Exceptions::InvalidNodeSpecError )
end
end
end
context "set_default_host!" do
let(:roles) { @roles || [["master", "agent", "database"], ["agent"]] }
let(:node1) { {:node1 => {:roles => roles[0]}} }
let(:node2) { {:node2 => {:roles => roles[1]}} }
let(:hosts) { node1.merge(node2) }
it "does nothing if the default host is already set" do
@roles = [["master"], ["agent", "default"]]
parser.set_default_host!(hosts)
expect(hosts[:node1][:roles].include?('default')).to be === false
expect(hosts[:node2][:roles].include?('default')).to be === true
end
it "makes the master default" do
@roles = [["master"], ["agent"]]
parser.set_default_host!(hosts)
expect(hosts[:node1][:roles].include?('default')).to be === true
expect(hosts[:node2][:roles].include?('default')).to be === false
end
it "makes a single node default" do
@roles = [["master", "database", "dashboard", "agent"]]
parser.set_default_host!(node1)
expect(hosts[:node1][:roles].include?('default')).to be === true
end
it "makes a single non-master node default" do
@roles = [["database", "dashboard", "agent"]]
parser.set_default_host!(node1)
expect(hosts[:node1][:roles].include?('default')).to be === true
end
it "raises an error if two nodes are defined as default" do
@roles = [["master", "default"], ["default"]]
expect { parser.set_default_host!(hosts) }.to raise_error(ArgumentError)
end
end
describe "normalize_args" do
let(:hosts) do
Beaker::Options::OptionsHash.new.merge({
'HOSTS' => {
:master => {
:roles => ["master", "agent", "arbitrary_role"],
:platform => 'el-7-x86_64',
:user => 'root',
},
:agent => {
:roles => ["agent", "default", "other_abitrary_role"],
:platform => 'el-7-x86_64',
:user => 'root',
},
},
'fail_mode' => 'slow',
'preserve_hosts' => 'always',
'host_tags' => {}
})
end
def fake_hosts_file_for_platform(hosts, platform)
hosts['HOSTS'].values.each { |h| h[:platform] = platform }
filename = "hosts_file_#{platform}"
File.open(filename, "w") do |file|
YAML.dump(hosts, file)
end
filename
end
shared_examples_for(:a_platform_supporting_only_agents) do |platform, _type|
it "restricts #{platform} hosts to agent" do
args = []
args << '--hosts' << fake_hosts_file_for_platform(hosts, platform)
expect { parser.parse_args(args) }.to raise_error(ArgumentError, /#{platform}.*may not have roles: master, database, dashboard/)
end
end
context "restricts agents" do
it_should_behave_like(:a_platform_supporting_only_agents, 'windows-version-arch')
it_should_behave_like(:a_platform_supporting_only_agents, 'el-4-arch')
end
context "ssh user" do
it 'uses the ssh[:user] if it is provided' do
hosts['HOSTS'][:master][:ssh] = {:user => 'hello'}
parser.instance_variable_set(:@options, hosts)
parser.normalize_args
expect(hosts['HOSTS'][:master][:user]).to be == 'hello'
end
it 'uses default user if there is an ssh hash, but no ssh[:user]' do
hosts['HOSTS'][:master][:ssh] = {:hello => 'hello'}
parser.instance_variable_set(:@options, hosts)
parser.normalize_args
expect(hosts['HOSTS'][:master][:user]).to be == 'root'
end
it 'uses default user if no ssh hash' do
parser.instance_variable_set(:@options, hosts)
parser.normalize_args
expect(hosts['HOSTS'][:master][:user]).to be == 'root'
end
end
end
describe '#normalize_tags!' do
let (:tag_includes) { @tag_includes || [] }
let (:tag_excludes) { @tag_excludes || [] }
let (:options) {
opts = Beaker::Options::OptionsHash.new
opts[:tag_includes] = tag_includes
opts[:tag_excludes] = tag_excludes
opts
}
it 'does not error if no tags overlap' do
@tag_includes = 'can,tommies,potatoes,plant'
@tag_excludes = 'joey,long_running,pants'
parser.instance_variable_set(:@options, options)
expect { parser.normalize_tags! }.not_to raise_error
end
it 'splits the basic case correctly' do
@tag_includes = 'can,tommies,potatoes,plant'
@tag_excludes = 'joey,long_running,pants'
parser.instance_variable_set(:@options, options)
parser.normalize_tags!
expect(options[:tag_includes]).to be === ['can', 'tommies', 'potatoes', 'plant']
expect(options[:tag_excludes]).to be === ['joey', 'long_running', 'pants']
end
it 'returns empty arrays for empty strings' do
@tag_includes = ''
@tag_excludes = ''
parser.instance_variable_set(:@options, options)
parser.normalize_tags!
expect(options[:tag_includes]).to be === []
expect(options[:tag_excludes]).to be === []
end
it 'lowercases all tags correctly for later use' do
@tag_includes = 'jeRRy_And_tOM,PARka'
@tag_excludes = 'lEet_spEAK,pOland'
parser.instance_variable_set(:@options, options)
parser.normalize_tags!
expect(options[:tag_includes]).to be === ['jerry_and_tom', 'parka']
expect(options[:tag_excludes]).to be === ['leet_speak', 'poland']
end
end
describe '#resolve_symlinks' do
let (:options) { Beaker::Options::OptionsHash.new }
it 'calls File.realpath if hosts_file is set' do
options[:hosts_file] = opts_path
parser.instance_variable_set(:@options, options)
parser.resolve_symlinks!
expect(parser.instance_variable_get(:@options)[:hosts_file]).to be === opts_path
end
it 'does not throw an error if hosts_file is not set' do
options[:hosts_file] = nil
parser.instance_variable_set(:@options, options)
expect { parser.resolve_symlinks! }.to_not raise_error
end
end
describe '#get_hypervisors' do
it 'returns a unique list' do
hosts_dupe = {
'vm1' => {hypervisor: 'hi'},
'vm2' => {hypervisor: 'hi'},
'vm3' => {hypervisor: 'bye'}
}
hosts_single = {'vm1' => {hypervisor: 'hi'}}
expect(parser.get_hypervisors(hosts_dupe)).to eq(%w(hi bye))
expect(parser.get_hypervisors(hosts_single)).to eq(%w(hi))
end
end
describe '#get_roles' do
it 'returns a unique list' do
roles_dupe = {
'vm1' => {roles: ['master']},
'vm2' => {roles: %w(database dashboard)},
'vm3' => {roles: ['bye']}
}
roles_single = {'vm1' => {roles: ['hi']}}
expect(parser.get_roles(roles_dupe)).to eq([['master'], %w(database dashboard), ['bye']])
expect(parser.get_roles(roles_single)).to eq([['hi']])
end
end
describe '#check_hypervisor_config' do
let (:options) { Beaker::Options::OptionsHash.new }
let (:invalid_file) { '/tmp/doesnotexist_visor.yml' }
before :each do
FakeFS.deactivate!
end
it 'checks ec2_yaml when blimpy' do
options[:ec2_yaml] = hosts_path
options[:dot_fog] = invalid_file
parser.instance_variable_set(:@options, options)
expect { parser.check_hypervisor_config('blimpy') }.to_not raise_error
end
it 'throws an error if ec2_yaml for blimpy is invalid' do
options[:ec2_yaml] = invalid_file
options[:dot_fog] = hosts_path
parser.instance_variable_set(:@options, options)
expect { parser.check_hypervisor_config('blimpy') }.to raise_error(ArgumentError, /required by blimpy/)
end
%w(aix solaris vcloud).each do |visor|
it "checks dot_fog when #{visor}" do
options[:ec2_yaml] = invalid_file
options[:dot_fog] = hosts_path
parser.instance_variable_set(:@options, options)
expect { parser.check_hypervisor_config(visor) }.to_not raise_error
end
it "throws an error if dot_fog for #{visor} is invalid" do
options[:ec2_yaml] = hosts_path
options[:dot_fog] = invalid_file
parser.instance_variable_set(:@options, options)
expect { parser.check_hypervisor_config(visor) }.to raise_error(ArgumentError, /required by #{visor}/)
end
end
it 'does not throw error on unknown visor' do
expect { parser.check_hypervisor_config('unknown_visor') }.to_not raise_error
end
end
end
end
end
| 1 | 13,927 | These changes look like a hosts file _must_ be provided? | voxpupuli-beaker | rb |
@@ -92,8 +92,9 @@ int FixPythonInvoke::setmask()
void FixPythonInvoke::end_of_step()
{
PyUtils::GIL lock;
+ char fmt[] = "O";
- PyObject * result = PyObject_CallFunction((PyObject*)pFunc, "O", (PyObject*)lmpPtr);
+ PyObject * result = PyObject_CallFunction((PyObject*)pFunc, fmt, (PyObject*)lmpPtr);
if (!result) {
PyUtils::Print_Errors(); | 1 | /* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://lammps.sandia.gov/, Sandia National Laboratories
Steve Plimpton, [email protected]
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Richard Berger (Temple U)
------------------------------------------------------------------------- */
#include "fix_python_invoke.h"
#include "comm.h"
#include "error.h"
#include "lmppython.h"
#include "python_compat.h"
#include "python_utils.h"
#include "update.h"
#include <cstring>
#include <Python.h> // IWYU pragma: export
using namespace LAMMPS_NS;
using namespace FixConst;
/* ---------------------------------------------------------------------- */
FixPythonInvoke::FixPythonInvoke(LAMMPS *lmp, int narg, char **arg) :
Fix(lmp, narg, arg)
{
if (narg != 6) error->all(FLERR,"Illegal fix python/invoke command");
nevery = utils::inumeric(FLERR,arg[3],false,lmp);
if (nevery <= 0) error->all(FLERR,"Illegal fix python/invoke command");
// ensure Python interpreter is initialized
python->init();
if (strcmp(arg[4],"post_force") == 0) {
selected_callback = POST_FORCE;
} else if (strcmp(arg[4],"end_of_step") == 0) {
selected_callback = END_OF_STEP;
} else {
error->all(FLERR,"Unsupported callback name for fix python/invoke");
}
// get Python function
PyUtils::GIL lock;
PyObject *pyMain = PyImport_AddModule("__main__");
if (!pyMain) {
PyUtils::Print_Errors();
error->all(FLERR,"Could not initialize embedded Python");
}
char *fname = arg[5];
pFunc = PyObject_GetAttrString(pyMain, fname);
if (!pFunc) {
PyUtils::Print_Errors();
error->all(FLERR,"Could not find Python function");
}
lmpPtr = PY_VOID_POINTER(lmp);
}
/* ---------------------------------------------------------------------- */
FixPythonInvoke::~FixPythonInvoke()
{
PyUtils::GIL lock;
Py_CLEAR(lmpPtr);
}
/* ---------------------------------------------------------------------- */
int FixPythonInvoke::setmask()
{
return selected_callback;
}
/* ---------------------------------------------------------------------- */
void FixPythonInvoke::end_of_step()
{
PyUtils::GIL lock;
PyObject * result = PyObject_CallFunction((PyObject*)pFunc, "O", (PyObject*)lmpPtr);
if (!result) {
PyUtils::Print_Errors();
error->all(FLERR,"Fix python/invoke end_of_step() method failed");
}
Py_CLEAR(result);
}
/* ---------------------------------------------------------------------- */
void FixPythonInvoke::post_force(int vflag)
{
if (update->ntimestep % nevery != 0) return;
PyUtils::GIL lock;
PyObject * result = PyObject_CallFunction((PyObject*)pFunc, "Oi", (PyObject*)lmpPtr, vflag);
if (!result) {
PyUtils::Print_Errors();
error->all(FLERR,"Fix python/invoke post_force() method failed");
}
Py_CLEAR(result);
}
| 1 | 30,574 | @akohlmey Just make it a cast like the others. I dislike it either way, but that's the price we pay for Python 2 support. | lammps-lammps | cpp |
@@ -68,6 +68,7 @@ program
.option('--disable_brave_extension', 'disable loading the Brave extension')
.option('--disable_pdfjs_extension', 'disable loading the PDFJS extension')
.option('--enable_brave_update', 'enable brave update')
+ .option('--channel <target_chanel>', 'target channel to start', /^(beta|dev|nightly|release)$/i, 'release')
.arguments('[build_config]')
.action(start)
| 1 | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
const program = require('commander');
const path = require('path')
const fs = require('fs-extra')
const config = require('../lib/config')
const util = require('../lib/util')
const build = require('../lib/build')
const versions = require('../lib/versions')
const start = require('../lib/start')
const updatePatches = require('../lib/updatePatches')
const pullL10n = require('../lib/pullL10n')
const pushL10n = require('../lib/pushL10n')
const chromiumRebaseL10n = require('../lib/chromiumRebaseL10n')
const createDist = require('../lib/createDist')
const upload = require('../lib/upload')
const test = require('../lib/test')
program
.version(process.env.npm_package_version)
program
.command('versions')
.action(versions)
program
.command('build')
.option('-C <build_dir>', 'build config (out/Debug, out/Release')
.option('--target_arch <target_arch>', 'target architecture', 'x64')
.option('--mac_signing_identifier <id>', 'The identifier to use for signing')
.option('--mac_signing_keychain <keychain>', 'The identifier to use for signing', 'login')
.option('--debug_build <debug_build>', 'keep debugging symbols')
.option('--official_build <official_build>', 'force official build settings')
.option('--brave_google_api_key <brave_google_api_key>')
.option('--brave_google_api_endpoint <brave_google_api_endpoint>')
.option('--no_branding_update', 'don\'t copy BRANDING to the chrome theme dir')
.option('--channel <target_chanel>', 'target channel to build', /^(beta|dev|nightly|release)$/i, 'release')
.arguments('[build_config]')
.action(build)
program
.command('create_dist')
.option('-C <build_dir>', 'build config (out/Debug, out/Release')
.option('--target_arch <target_arch>', 'target architecture', 'x64')
.option('--mac_signing_identifier <id>', 'The identifier to use for signing')
.option('--mac_signing_keychain <keychain>', 'The identifier to use for signing', 'login')
.option('--debug_build <debug_build>', 'keep debugging symbols')
.option('--official_build <official_build>', 'force official build settings')
.option('--brave_google_api_key <brave_google_api_key>')
.option('--brave_google_api_endpoint <brave_google_api_endpoint>')
.option('--no_branding_update', 'don\'t copy BRANDING to the chrome theme dir')
.option('--channel <target_chanel>', 'target channel to build', /^(beta|dev|nightly|release)$/i, 'release')
.arguments('[build_config]')
.action(createDist)
program
.command('upload')
.option('--target_arch <target_arch>', 'target architecture', 'x64')
.action(upload)
program
.command('start')
.option('--v [log_level]', 'set log level to [log_level]', parseInt, '0')
.option('--user_data_dir_name [base_name]', 'set user data directory base name to [base_name]', 'brave-development')
.option('--no_sandbox', 'disable the sandbox')
.option('--disable_brave_extension', 'disable loading the Brave extension')
.option('--disable_pdfjs_extension', 'disable loading the PDFJS extension')
.option('--enable_brave_update', 'enable brave update')
.arguments('[build_config]')
.action(start)
program
.command('pull_l10n')
.action(pullL10n)
program
.command('push_l10n')
.action(pushL10n)
program
.command('chromium_rebase_l10n')
.action(chromiumRebaseL10n)
program
.command('update_patches')
.action(updatePatches)
program
.command('cibuild')
.option('--target_arch <target_arch>', 'target architecture', 'x64')
.action((options) => {
options.official_build = true
build('Release', options)
})
program
.command('test <suite>')
.option('--v [log_level]', 'set log level to [log_level]', parseInt, '0')
.option('--filter <filter>', 'set test filter')
.option('--disable_brave_extension', 'disable loading the Brave extension')
.arguments('[build_config]')
.action(test)
program
.parse(process.argv)
| 1 | 5,387 | I wonder if the default here should be nightly? I won't block on that though just a question for a follow up. | brave-brave-browser | js |
@@ -181,7 +181,7 @@ namespace TestRunner
Application.SetUnhandledExceptionMode(UnhandledExceptionMode.CatchException);
Application.ThreadException += ThreadExceptionEventHandler;
- Console.OutputEncoding = Encoding.UTF8; // So we can send Japanese to SkylineTester, which monitors our stdout
+ //Console.OutputEncoding = Encoding.UTF8; // So we can send Japanese to SkylineTester, which monitors our stdout
// Parse command line args and initialize default values.
const string commandLineOptions = | 1 | /*
* Original author: Don Marsh <donmarsh .at. u.washington.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2012 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Globalization;
using System.IO;
using System.Linq;
using System.Reflection;
using System.Runtime.InteropServices;
using System.Text;
using System.Text.RegularExpressions;
using System.Threading;
using System.Windows.Forms;
//WARNING: Including TestUtil in this project causes a strange build problem, where the first
// build from Visual Studio after a full bjam build removes all of the Skyline project
// root files from the Skyline bin directory, leaving it un-runnable until a full
// rebuild is performed. Do not commit a reference to TestUtil to this project without
// testing this case and getting someone else to validate that you have fixed this
// problem.
//using pwiz.SkylineTestUtil;
using TestRunnerLib;
namespace TestRunner
{
internal static class Program
{
private static readonly string[] TEST_DLLS = { "Test.dll", "TestA.dll", "TestConnected.dll", "TestFunctional.dll", "TestTutorial.dll", "CommonTest.dll", "TestPerf.dll" };
private const int LeakTrailingDeltas = 7; // Number of trailing deltas to average and check against thresholds below
// CONSIDER: Ideally these thresholds would be zero, but memory and handle retention are not stable enough to support that
// The problem is that we don't reliably return to exactly the same state during EndTest and these numbers go both up and down
private const int KB = 1024;
private static LeakTracking LeakThresholds = new LeakTracking
{
// Average delta per test between 8 runs (7 deltas)
TotalMemory = 150 * KB, // Too much variance to track leaks in just 12 runs
HeapMemory = 20 * KB,
ManagedMemory = 8 * KB,
TotalHandles = 2,
UserGdiHandles = 1
};
private const int CrtLeakThreshold = 1000; // No longer used
private const int LeakCheckIterations = 24; // Maximum number of runs to try to achieve below thresholds for trailing deltas
private static bool IsFixedLeakIterations { get { return false; } } // CONSIDER: It would be nice to make this true to reduce test run count variance
// These tests get twice as many runs to meet the leak thresholds when using fixed iterations
private static string[] LeakExceptionTests =
{
"TestAbsoluteQuantificationTutorial",
"TestCEOptimizationTutorial",
"TestMethodRefinementTutorial",
"TestTargetedMSMSTutorial",
"TestMs1Tutorial"
};
private static int GetLeakCheckIterations(TestInfo test)
{
return IsFixedLeakIterations && LeakExceptionTests.Contains(test.TestMethod.Name)
? LeakCheckIterations * 2
: LeakCheckIterations;
}
private struct LeakTracking
{
public LeakTracking(RunTests runTests) : this()
{
TotalMemory = runTests.TotalMemoryBytes;
HeapMemory = runTests.CommittedMemoryBytes;
ManagedMemory = runTests.ManagedMemoryBytes;
TotalHandles = runTests.LastTotalHandleCount;
UserGdiHandles = runTests.LastUserHandleCount + runTests.LastGdiHandleCount;
}
public double TotalMemory { get; set; }
public double HeapMemory { get; set; }
public double ManagedMemory { get; set; }
public double TotalHandles { get; set; }
public double UserGdiHandles { get; set; }
public bool BelowThresholds(LeakTracking leakThresholds)
{
return TotalMemory < leakThresholds.TotalMemory &&
HeapMemory < leakThresholds.HeapMemory &&
ManagedMemory < leakThresholds.ManagedMemory &&
TotalHandles < leakThresholds.TotalHandles &&
UserGdiHandles < leakThresholds.UserGdiHandles;
}
public static LeakTracking MeanDeltas(List<LeakTracking> values)
{
return new LeakTracking
{
TotalMemory = MeanDelta(values, l => l.TotalMemory),
HeapMemory = MeanDelta(values, l => l.HeapMemory),
ManagedMemory = MeanDelta(values, l => l.ManagedMemory),
TotalHandles = MeanDelta(values, l => l.TotalHandles),
UserGdiHandles = MeanDelta(values, l => l.UserGdiHandles)
};
}
private static double MeanDelta(List<LeakTracking> values, Func<LeakTracking, double> getValue)
{
var listDelta = new List<double>();
for (int i = 1; i < values.Count; i++)
listDelta.Add(getValue(values[i]) - getValue(values[i - 1]));
return listDelta.Average();
}
public string GetLeakMessage(LeakTracking leakThresholds, string testName)
{
if (ManagedMemory >= leakThresholds.ManagedMemory)
return string.Format("!!! {0} LEAKED {1:0.#} Managed bytes\r\n", testName, ManagedMemory);
if (HeapMemory >= leakThresholds.HeapMemory)
return string.Format("!!! {0} LEAKED {1:0.#} Heap bytes\r\n", testName, HeapMemory);
if (TotalMemory >= leakThresholds.TotalMemory)
return string.Format("!!! {0} LEAKED {1:0.#} bytes\r\n", testName, TotalMemory);
if (UserGdiHandles >= leakThresholds.UserGdiHandles)
return string.Format("!!! {0} HANDLE-LEAKED {1:0.#} User+GDI\r\n", testName, UserGdiHandles);
if (TotalHandles >= leakThresholds.TotalHandles)
return string.Format("!!! {0} HANDLE-LEAKED {1:0.#} Total\r\n", testName, TotalHandles);
return null;
}
public string GetLogMessage(string testName, int passedCount)
{
// Report the final mean average deltas over the passing or final 8 runs (7 deltas)
return string.Format("# {0} deltas ({1}): {2}\r\n", testName, passedCount, this);
}
public override string ToString()
{
return string.Format("managed = {0:0.#} KB, heap = {1:0.#} KB, memory = {2:0.#} KB, user-gdi = {3:0.#}, total = {4:0.#}",
ManagedMemory / KB, HeapMemory / KB, TotalMemory / KB, UserGdiHandles, TotalHandles);
}
public LeakTracking Max(LeakTracking lastDeltas)
{
return new LeakTracking
{
TotalMemory = Math.Max(TotalMemory, lastDeltas.TotalMemory),
HeapMemory = Math.Max(HeapMemory, lastDeltas.HeapMemory),
ManagedMemory = Math.Max(ManagedMemory, lastDeltas.ManagedMemory),
TotalHandles = Math.Max(TotalHandles, lastDeltas.TotalHandles),
UserGdiHandles = Math.Max(UserGdiHandles, lastDeltas.UserGdiHandles)
};
}
public LeakTracking Min(LeakTracking lastDeltas)
{
return new LeakTracking
{
TotalMemory = Math.Min(TotalMemory, lastDeltas.TotalMemory),
HeapMemory = Math.Min(HeapMemory, lastDeltas.HeapMemory),
ManagedMemory = Math.Min(ManagedMemory, lastDeltas.ManagedMemory),
TotalHandles = Math.Min(TotalHandles, lastDeltas.TotalHandles),
UserGdiHandles = Math.Min(UserGdiHandles, lastDeltas.UserGdiHandles)
};
}
}
[STAThread]
static int Main(string[] args)
{
Application.SetUnhandledExceptionMode(UnhandledExceptionMode.CatchException);
Application.ThreadException += ThreadExceptionEventHandler;
Console.OutputEncoding = Encoding.UTF8; // So we can send Japanese to SkylineTester, which monitors our stdout
// Parse command line args and initialize default values.
const string commandLineOptions =
"?;/?;-?;help;skylinetester;debug;results;" +
"test;skip;filter;form;" +
"loop=0;repeat=1;pause=0;random=off;offscreen=on;multi=1;wait=off;internet=off;" +
"maxsecondspertest=-1;" +
"demo=off;showformnames=off;showpages=off;status=off;buildcheck=0;screenshotlist;" +
"quality=off;pass0=off;pass1=off;" +
"perftests=off;" +
"runsmallmoleculeversions=off;" +
"testsmallmolecules=off;" +
"clipboardcheck=off;profile=off;vendors=on;language=fr-FR,en-US;" +
"log=TestRunner.log;report=TestRunner.log";
var commandLineArgs = new CommandLineArgs(args, commandLineOptions);
switch (commandLineArgs.SearchArgs("?;/?;-?;help;report"))
{
case "?":
case "/?":
case "-?":
case "help":
Help();
return 0;
case "report":
Report(commandLineArgs.ArgAsString("report"));
return 0;
}
Console.WriteLine();
if (!commandLineArgs.ArgAsBool("status") && !commandLineArgs.ArgAsBool("buildcheck"))
{
Console.WriteLine("TestRunner " + string.Join(" ", args) + "\n");
Console.WriteLine("Process: {0}\n", Process.GetCurrentProcess().Id);
}
if (commandLineArgs.HasArg("debug"))
{
Console.WriteLine("*** Launching debugger ***\n\n");
// NOTE: For efficient debugging of Skyline, it is most useful to choose a debugger
// that already has Skyline.sln loaded. Otherwise, you might not be able to set
// breakpoints.
Debugger.Break();
}
// Create log file.
var logStream = new FileStream(
commandLineArgs.ArgAsString("log"),
FileMode.Create,
FileAccess.Write,
FileShare.ReadWrite);
var log = new StreamWriter(logStream);
bool allTestsPassed = true;
try
{
// Load list of tests.
var unfilteredTestList = LoadTestList(commandLineArgs);
// Filter test list.
var testList = unfilteredTestList;
if (commandLineArgs.HasArg("filter"))
{
testList = new List<TestInfo>();
var filterRanges = commandLineArgs.ArgAsString("filter").Split(',');
foreach (var range in filterRanges)
{
var bounds = range.Split('-');
if (bounds.Length < 1 || bounds.Length > 2)
{
throw new ArgumentException("Unrecognized filter parameter: {0}", range);
}
int low;
if (!int.TryParse(bounds[0], out low))
{
throw new ArgumentException("Unrecognized filter parameter: {0}", range);
}
int high = low;
if (bounds.Length == 2 && !int.TryParse(bounds[1], out high))
{
throw new ArgumentException("Unrecognized filter parameter: {0}", range);
}
for (var i = low-1; i <= high-1; i++)
{
testList.Add(unfilteredTestList[i]);
}
}
}
if (testList.Count == 0)
{
Console.WriteLine("No tests found");
allTestsPassed = false;
}
else
{
var passes = commandLineArgs.ArgAsLong("loop");
var repeat = commandLineArgs.ArgAsLong("repeat");
if (commandLineArgs.ArgAsBool("buildcheck"))
{
passes = 1;
repeat = 1;
}
// Prevent system sleep.
using (new SystemSleep())
{
// Pause before first test for profiling.
bool profiling = commandLineArgs.ArgAsBool("profile");
if (profiling)
{
Console.WriteLine("\nRunning each test once to warm up memory...\n");
allTestsPassed = RunTestPasses(testList, unfilteredTestList, commandLineArgs, log, 1, 1,
true);
Console.WriteLine("\nTaking memory snapshot...\n");
MemoryProfiler.Snapshot("start");
if (passes == 0)
passes = 1;
}
allTestsPassed =
RunTestPasses(testList, unfilteredTestList, commandLineArgs, log, passes, repeat, profiling) &&
allTestsPassed;
// Pause for profiling
if (profiling)
{
Console.WriteLine("\nTaking second memory snapshot...\n");
MemoryProfiler.Snapshot("end");
}
}
}
}
catch (Exception e)
{
Console.WriteLine("\nCaught exception in TestRunnner.Program.Main:\n" + e.Message);
if (string.IsNullOrEmpty(e.StackTrace))
Console.WriteLine("No stacktrace");
else
Console.WriteLine(e.StackTrace);
if (e.InnerException != null)
{
Console.WriteLine("Inner exception:");
Console.WriteLine(e.InnerException.Message);
if (string.IsNullOrEmpty(e.InnerException.StackTrace))
Console.WriteLine("No stacktrace");
else
Console.WriteLine(e.InnerException.StackTrace);
}
else
{
Console.WriteLine("No inner exception.");
}
Console.Out.Flush(); // Get this info to TeamCity or SkylineTester ASAP
allTestsPassed = false;
}
// Display report.
log.Close();
Console.WriteLine("\n");
if (!commandLineArgs.ArgAsBool("status"))
Report(commandLineArgs.ArgAsString("log"));
// Ungraceful exit to avoid unwinding errors
//Process.GetCurrentProcess().Kill();
if (commandLineArgs.ArgAsBool("wait"))
Console.ReadKey();
return allTestsPassed ? 0 : 1;
}
private static DirectoryInfo GetSkylineDirectory()
{
string skylinePath = Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location);
var skylineDirectory = skylinePath != null ? new DirectoryInfo(skylinePath) : null;
while (skylineDirectory != null && skylineDirectory.Name != "Skyline")
skylineDirectory = skylineDirectory.Parent;
return skylineDirectory;
}
// Run all test passes.
private static bool RunTestPasses(
List<TestInfo> testList,
List<TestInfo> unfilteredTestList,
CommandLineArgs commandLineArgs,
StreamWriter log,
long loopCount,
long repeat,
bool profiling = false)
{
bool buildMode = commandLineArgs.ArgAsBool("buildcheck");
bool randomOrder = commandLineArgs.ArgAsBool("random");
bool demoMode = commandLineArgs.ArgAsBool("demo");
bool offscreen = commandLineArgs.ArgAsBool("offscreen");
bool internet = commandLineArgs.ArgAsBool("internet");
bool perftests = commandLineArgs.ArgAsBool("perftests");
bool addsmallmoleculenodes = commandLineArgs.ArgAsBool("testsmallmolecules"); // Add the magic small molecule test node to every document?
bool runsmallmoleculeversions = commandLineArgs.ArgAsBool("runsmallmoleculeversions"); // Run the various tests that are versions of other tests with the document completely converted to small molecules?
bool useVendorReaders = commandLineArgs.ArgAsBool("vendors");
bool showStatus = commandLineArgs.ArgAsBool("status");
bool showFormNames = commandLineArgs.ArgAsBool("showformnames");
bool showMatchingPages = commandLineArgs.ArgAsBool("showpages");
bool qualityMode = commandLineArgs.ArgAsBool("quality");
bool pass0 = commandLineArgs.ArgAsBool("pass0");
bool pass1 = commandLineArgs.ArgAsBool("pass1");
int timeoutMultiplier = (int) commandLineArgs.ArgAsLong("multi");
int pauseSeconds = (int) commandLineArgs.ArgAsLong("pause");
var formList = commandLineArgs.ArgAsString("form");
var pauseDialogs = (string.IsNullOrEmpty(formList)) ? null : formList.Split(',');
var results = commandLineArgs.ArgAsString("results");
var maxSecondsPerTest = commandLineArgs.ArgAsDouble("maxsecondspertest");
bool asNightly = offscreen && qualityMode; // While it is possible to run quality off screen from the Quality tab, this is what we use to distinguish for treatment of perf tests
// If we haven't been told to run perf tests, remove any from the list
// which may have shown up by default
if (!perftests)
{
for (var t = testList.Count; t-- > 0; )
{
if (testList[t].IsPerfTest)
{
testList.RemoveAt(t);
}
}
for (var ut = unfilteredTestList.Count; ut-- > 0; )
{
if (unfilteredTestList[ut].IsPerfTest)
{
unfilteredTestList.RemoveAt(ut);
}
}
}
// Even if we have been told to run perftests, if none are in the list
// then make sure we don't chat about perf tests in the log
perftests &= testList.Any(t => t.IsPerfTest);
if (buildMode)
{
randomOrder = false;
demoMode = false;
offscreen = true;
useVendorReaders = true;
showStatus = false;
qualityMode = false;
pauseSeconds = 0;
}
var runTests = new RunTests(
demoMode, buildMode, offscreen, internet, showStatus, perftests, addsmallmoleculenodes,
runsmallmoleculeversions,
pauseDialogs, pauseSeconds, useVendorReaders, timeoutMultiplier,
results, log);
if (commandLineArgs.ArgAsBool("clipboardcheck"))
{
runTests.TestContext.Properties["ClipboardCheck"] = "TestRunner clipboard check";
Console.WriteLine("Checking clipboard use for {0} tests...\n", testList.Count);
loopCount = 1;
randomOrder = false;
}
else
{
if (!randomOrder && perftests)
runTests.Log("Perf tests will run last, for maximum overall test coverage.\r\n");
runTests.Log("Running {0}{1} tests{2}{3}...\r\n",
testList.Count,
testList.Count < unfilteredTestList.Count ? "/" + unfilteredTestList.Count : "",
(loopCount <= 0) ? " forever" : (loopCount == 1) ? "" : " in " + loopCount + " loops",
(repeat <= 1) ? "" : ", repeated " + repeat + " times each per language");
}
// Get list of languages
var languages = buildMode
? new[] {"en"}
: commandLineArgs.ArgAsString("language").Split(',');
if (showFormNames)
runTests.Skyline.Set("ShowFormNames", true);
if (showMatchingPages)
runTests.Skyline.Set("ShowMatchingPages", true);
var executingDirectory = Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location);
var qualityLanguages = new FindLanguages(executingDirectory, "en", "fr").Enumerate().ToArray();
var removeList = new List<TestInfo>();
// Pass 0: Test an interesting collection of edge cases:
// French number format,
// No vendor readers,
// No internet access,
// Old reports
if (pass0)
{
runTests.Log("\r\n");
runTests.Log("# Pass 0: Run with French number format, no vendor readers, no internet access, old reports.\r\n");
runTests.Language = new CultureInfo("fr");
runTests.Skyline.Set("NoVendorReaders", true);
runTests.AccessInternet = false;
runTests.LiveReports = false;
runTests.RunPerfTests = false;
runTests.AddSmallMoleculeNodes = false;
runTests.CheckCrtLeaks = CrtLeakThreshold;
bool warnedPass0PerfTest = false;
for (int testNumber = 0; testNumber < testList.Count; testNumber++)
{
var test = testList[testNumber];
if (test.IsPerfTest)
{
// These are largely about vendor and/or internet performance, so not worth doing in pass 0
if (!warnedPass0PerfTest)
{
warnedPass0PerfTest = true;
runTests.Log("# Skipping perf tests for pass 0.\r\n");
}
continue;
}
if (!runTests.Run(test, 0, testNumber))
removeList.Add(test);
}
runTests.Skyline.Set("NoVendorReaders", false);
runTests.AccessInternet = internet;
runTests.LiveReports = true;
runTests.RunPerfTests = perftests;
runTests.AddSmallMoleculeNodes = addsmallmoleculenodes;
runTests.CheckCrtLeaks = 0;
foreach (var removeTest in removeList)
testList.Remove(removeTest);
removeList.Clear();
}
// Pass 1: Look for cumulative leaks when test is run multiple times.
if (pass1)
{
runTests.Log("\r\n");
runTests.Log("# Pass 1: Run tests multiple times to detect memory leaks.\r\n");
bool warnedPass1PerfTest = false;
var maxDeltas = new LeakTracking();
int maxIterationCount = 0;
for (int testNumber = 0; testNumber < testList.Count; testNumber++)
{
var test = testList[testNumber];
bool failed = false;
if (test.IsPerfTest)
{
// These are generally too lengthy to run multiple times, so not a good fit for pass 1
if (!warnedPass1PerfTest)
{
warnedPass1PerfTest = true;
runTests.Log("# Skipping perf tests for pass 1 leak checks.\r\n");
}
continue;
}
if (failed)
continue;
// Run test repeatedly until we can confidently assess the leak status.
var listValues = new List<LeakTracking>();
LeakTracking? minDeltas = null;
int? passedIndex = null;
for (int i = 0; i < GetLeakCheckIterations(test); i++)
{
// Run the test in the next language.
runTests.Language =
new CultureInfo(qualityLanguages[i%qualityLanguages.Length]);
if (!runTests.Run(test, 1, testNumber))
{
failed = true;
removeList.Add(test);
break;
}
// Run linear regression on memory size samples.
listValues.Add(new LeakTracking(runTests));
if (listValues.Count <= LeakTrailingDeltas)
continue;
// Stop accumulating points if all leak minimal values are below the threshold values.
var lastDeltas = LeakTracking.MeanDeltas(listValues);
minDeltas = minDeltas.HasValue ? minDeltas.Value.Min(lastDeltas) : lastDeltas;
if (minDeltas.Value.BelowThresholds(LeakThresholds))
{
passedIndex = passedIndex ?? i;
if (!IsFixedLeakIterations)
break;
}
// Remove the oldest point unless this is the last iteration
// So that the report below will be based on the set that just
// failed the leak check
if (!passedIndex.HasValue || i < LeakCheckIterations - 1)
{
listValues.RemoveAt(0);
}
}
if (failed)
continue;
string leakMessage = minDeltas.Value.GetLeakMessage(LeakThresholds, test.TestMethod.Name);
int iterationCount = passedIndex + 1 ?? LeakCheckIterations;
if (leakMessage != null)
{
runTests.Log(leakMessage);
removeList.Add(test);
}
runTests.Log(minDeltas.Value.GetLogMessage(test.TestMethod.Name, iterationCount));
maxDeltas = maxDeltas.Max(minDeltas.Value);
maxIterationCount = Math.Max(maxIterationCount, iterationCount);
}
runTests.Log(maxDeltas.GetLogMessage("MaximumLeaks", maxIterationCount));
foreach (var removeTest in removeList)
testList.Remove(removeTest);
removeList.Clear();
}
if (qualityMode)
languages = qualityLanguages;
// Run all test passes.
int pass = 1;
int passEnd = pass + (int) loopCount;
if (pass0 || pass1)
{
pass++;
passEnd++;
}
if (loopCount <= 0)
{
passEnd = int.MaxValue;
}
if (pass == 2 && pass < passEnd && testList.Count > 0)
{
runTests.Log("\r\n");
runTests.Log("# Pass 2+: Run tests in each selected language.\r\n");
}
int perfPass = pass; // For nightly tests, we'll run perf tests just once per language, and only in one language (dynamically chosen for coverage) if english and french (along with any others) are both enabled
bool needsPerfTestPass2Warning = asNightly && testList.Any(t => t.IsPerfTest); // No perf tests, no warning
var perfTestsOneLanguageOnly = asNightly && perftests && languages.Any(l => l.StartsWith("en")) && languages.Any(l => l.StartsWith("fr"));
bool flip = true;
for (; pass < passEnd; pass++)
{
if (testList.Count == 0)
break;
// Run each test in this test pass.
var testPass = randomOrder ? testList.RandomOrder().ToList() : testList;
for (int testNumber = 0; testNumber < testPass.Count; testNumber++)
{
var test = testPass[testNumber];
// Perf Tests are generally too lengthy to run multiple times (but non-english format check is useful, so rotate through on a per-day basis)
var perfTestLanguage = languages[DateTime.Now.DayOfYear % languages.Length];
var languagesThisTest = (test.IsPerfTest && perfTestsOneLanguageOnly) ? new[] { perfTestLanguage } : languages;
if (perfTestsOneLanguageOnly && needsPerfTestPass2Warning)
{
// NB the phrase "# Perf tests" in a log is a key for SkylineNightly to post to a different URL - so don't mess with this.
runTests.Log("# Perf tests will be run only once, and only in one language, dynamically chosen (by DayOfYear%NumberOfLanguages) for coverage. To run perf tests in specific languages, enable all but English.\r\n");
needsPerfTestPass2Warning = false;
}
// Run once (or repeat times) for each language.
for (int i = 0; i < languagesThisTest.Length; i++)
{
runTests.Language = new CultureInfo(languagesThisTest[i]);
var stopWatch = new Stopwatch();
stopWatch.Start(); // Limit the repeats in case of very long tests
for (int repeatCounter = 1; repeatCounter <= repeat; repeatCounter++)
{
if (asNightly && test.IsPerfTest && ((pass > perfPass) || (repeatCounter > 1)))
{
// Perf Tests are generally too lengthy to run multiple times (but per-language check is useful)
if (needsPerfTestPass2Warning)
{
// NB the phrase "# Perf tests" in a log is a key for SkylineNightly to post to a different URL - so don't mess with this.
runTests.Log("# Perf tests will be run only once per language.\r\n");
needsPerfTestPass2Warning = false;
}
break;
}
if (!runTests.Run(test, pass, testNumber))
{
removeList.Add(test);
i = languages.Length - 1; // Don't run other languages.
break;
}
if ( maxSecondsPerTest > 0)
{
var maxSecondsPerTestPerLanguage = maxSecondsPerTest / languagesThisTest.Length; // We'd like no more than 5 minutes per test across all languages when doing stess tests
if (stopWatch.Elapsed.TotalSeconds > maxSecondsPerTestPerLanguage && repeatCounter <= repeat - 1)
{
runTests.Log("# Breaking repeat test at count {0} of requested {1} (at {2} minutes), to allow other tests and languages to run.\r\n", repeatCounter, repeat, stopWatch.Elapsed.TotalMinutes);
break;
}
}
}
if (profiling)
break;
}
}
foreach (var removeTest in removeList)
testList.Remove(removeTest);
removeList.Clear();
runTests.AddSmallMoleculeNodes = addsmallmoleculenodes && (flip = !flip); // Do this in every other pass, so we get it both ways
}
return runTests.FailureCount == 0;
}
// Load list of tests to be run into TestList.
private static List<TestInfo> LoadTestList(CommandLineArgs commandLineArgs)
{
List<string> testNames;
var testList = new List<TestInfo>();
// Clear forms/tests cache if desired.
var formArg = commandLineArgs.ArgAsString("form");
// Load lists of tests to run.
if (string.IsNullOrEmpty(formArg))
testNames = LoadList(commandLineArgs.ArgAsString("test"));
// Find which tests best cover the desired forms.
else
{
var formLookup = new FormLookup();
List<string> uncoveredForms;
testNames = formLookup.FindTests(LoadList(formArg), out uncoveredForms);
if (uncoveredForms.Count > 0)
{
MessageBox.Show("No tests found to show these Forms: " + string.Join(", ", uncoveredForms), "Warning");
return testList;
}
}
// Maintain order in list of explicitly specified tests
var testDict = new Dictionary<string, int>();
for (int i = 0; i < testNames.Count; i++)
{
if (testDict.ContainsKey(testNames[i]))
{
MessageBox.Show("Duplicate test name: " + testNames[i]);
throw new ArgumentException("Duplicate test name: " + testNames[i]);
}
testDict.Add(testNames[i], i);
}
var testArray = new TestInfo[testNames.Count];
var skipList = LoadList(commandLineArgs.ArgAsString("skip"));
// Find tests in the test dlls.
foreach (var testDll in TEST_DLLS)
{
foreach (var testInfo in RunTests.GetTestInfos(testDll))
{
var testName = testInfo.TestClassType.Name + "." + testInfo.TestMethod.Name;
if (testNames.Count == 0 || testNames.Contains(testName) ||
testNames.Contains(testInfo.TestMethod.Name))
{
if (!skipList.Contains(testName) && !skipList.Contains(testInfo.TestMethod.Name))
{
if (testNames.Count == 0)
testList.Add(testInfo);
else
{
string lookup = testNames.Contains(testName) ? testName : testInfo.TestMethod.Name;
testArray[testDict[lookup]] = testInfo;
}
}
}
}
}
if (testNames.Count > 0)
testList.AddRange(testArray.Where(testInfo => testInfo != null));
// Sort tests alphabetically, but run perf tests last for best coverage in a fixed amount of time.
return testList.OrderBy(e => e.IsPerfTest).ThenBy(e => e.TestMethod.Name).ToList();
}
private static List<TestInfo> GetTestList(IEnumerable<string> dlls)
{
var testList = new List<TestInfo>();
// Find tests in the test dlls.
foreach (var testDll in dlls)
{
testList.AddRange(RunTests.GetTestInfos(testDll));
}
// Sort tests alphabetically.
testList.Sort((x, y) => String.CompareOrdinal(x.TestMethod.Name, y.TestMethod.Name));
return testList;
}
// Load a list of tests specified on the command line as a comma-separated list. Any name prefixed with '@'
// is a file containing test names separated by white space or new lines, with '#' indicating a comment.
private static List<string> LoadList(string testList)
{
var inputList = testList.Split(',');
var outputList = new List<string>();
// Check for empty list.
if (inputList.Length == 1 && inputList[0] == "")
{
return outputList;
}
foreach (var name in inputList)
{
if (name.StartsWith("@"))
{
var file = name.Substring(1);
var lines = File.ReadAllLines(file);
foreach (var line in lines)
{
// remove comments
var lineParts = line.Split('#');
if (lineParts.Length > 0 && lineParts[0] != "")
{
// split multiple test names in one line
outputList.AddRange(lineParts[0].Trim().Split(' ', '\t'));
}
}
}
else if (name.EndsWith(".dll", StringComparison.CurrentCultureIgnoreCase))
{
foreach (var testInfo in RunTests.GetTestInfos(name))
outputList.Add(testInfo.TestClassType.Name + "." + testInfo.TestMethod.Name);
}
else
{
outputList.Add(name);
}
}
return outputList;
}
private class LeakingTest
{
public string TestName;
public double LeakSize;
}
// Generate a summary report of errors and memory leaks from a log file.
private static void Report(string logFile)
{
var logLines = File.ReadAllLines(logFile);
var errorList = new List<string>();
var leakList = new List<LeakingTest>();
var handleLeakList = new List<LeakingTest>();
var crtLeakList = new List<LeakingTest>();
string error = null;
foreach (var line in logLines)
{
if (error != null)
{
if (line == "!!!")
{
errorList.Add(error);
error = null;
}
else
{
error += "# " + line + "\n";
}
continue;
}
var parts = Regex.Replace(line, @"\s+", " ").Trim().Split(' ');
// Is it an error line?
if (parts[0] == "!!!")
{
var test = parts[1];
var failureType = parts[2];
if (failureType == "LEAKED")
{
var leakSize = double.Parse(parts[3]);
leakList.Add(new LeakingTest { TestName = test, LeakSize = leakSize });
continue;
}
else if (failureType == "HANDLE-LEAKED")
{
var leakSize = double.Parse(parts[3]);
handleLeakList.Add(new LeakingTest { TestName = test, LeakSize = leakSize });
continue;
}
else if (failureType == "CRT-LEAKED")
{
var leakSize = long.Parse(parts[3]);
crtLeakList.Add(new LeakingTest { TestName = test, LeakSize = leakSize });
continue;
}
error = "# " + test + " FAILED:\n";
}
}
// Print list of errors sorted in descending order of frequency.
Console.WriteLine();
if (errorList.Count == 0)
Console.WriteLine("# No failures.\n");
foreach (var failure in errorList)
Console.WriteLine(failure);
if (leakList.Count > 0)
{
Console.WriteLine();
Console.WriteLine("# Leaking tests (bytes leaked per run):");
ReportLeaks(leakList);
}
if (handleLeakList.Count > 0)
{
Console.WriteLine();
Console.WriteLine("# Leaking handles tests (handles per run):");
ReportLeaks(handleLeakList);
}
if (crtLeakList.Count > 0)
{
Console.WriteLine();
Console.WriteLine("# Tests leaking unmanaged memory:");
ReportLeaks(crtLeakList);
}
}
private static void ReportLeaks(IEnumerable<LeakingTest> leakList)
{
foreach (var leakTest in leakList.OrderByDescending(test => test.LeakSize))
{
Console.WriteLine("# {0,-36} {1,10:0.#}",
leakTest.TestName.Substring(0, Math.Min(36, leakTest.TestName.Length)),
leakTest.LeakSize);
}
}
// Display help documentation.
private static void Help()
{
Console.WriteLine(@"
TestRunner with no parameters runs all Skyline unit tests (marked [TestMethod])
in random order until the process is killed. It produces a log file (TestRunner.log)
in the current directory. You can get a summary of errors and memory leaks by running
""TestRunner report"".
Here is a list of recognized arguments:
test=[test1,test2,...] Run one or more tests by name (separated by ',').
Test names can be just the method name, or the method
name prefixed by the class name and a period
(such as IrtTest.IrtFunctionalTest). Tests must belong
to a class marked [TestClass], although the method does
not need to be marked [TestMethod] to be included in a
test run. A name prefixed by '@' (such as ""@fail.txt"")
refers to a text file containing test names separated by
white space or new lines. These files can also include
single-line comments starting with a '#' character.
skip=[test1,test2,...] Skip the tests specified by name, using the same scheme
as the test option described above. You can specify
tests by name or by file (prefixed by the '@' character).
filter=[a-b,c-d,...] Once the list of tests has been generated using the test
and/or skip options, filter allows ranges of tests to be
run. This can be useful in narrowing down a problem that
occurred somewhere in a large test set. For example,
filter=1-10 will run the first 10 tests in the alphabetized
list. Multiple ranges are allowed, such as
filter=3-7,9,13-19.
loop=[n] Run the tests ""n"" times, where n is a non-negative
integer. A value of 0 will run the tests forever
(or until the process is killed). That is the default
setting if the loop argument is not specified.
repeat=[n] Repeat each test ""n"" times, where n is a positive integer.
This can help diagnose consistent memory leaks, in contrast
with a leak that occurs only the first time a test is run.
maxsecondspertest=[n] Used in conjunction with the repeat value, this limits the
amount of time a repeated test will take to no more than ""n""
seconds, where n is an integer greater than 0. If this time
is exceeded, the test will not be repeated further.
random=[on|off] Run the tests in random order (random=on, the default)
or alphabetic order (random=off). Each test is selected
exactly once per loop, regardless of the order.
offscreen=[on|off] Set offscreen=on (the default) to keep Skyline windows
from flashing on the desktop during a test run.
language=[language1,language2,...] Choose a random language from this list before executing
each test. Default value is ""en-US,fr-FR"". You can
specify just one language if you want all tests to run
in that language.
demo=[on|off] Set demo=on to pause slightly at PauseForScreenshot() calls
maximize the main window and show all-chromatograms graph
in lower-right corner
multi=[n] Multiply timeouts in unit tests by a factor of ""n"".
This is necessary when running multiple instances of
TestRunner simultaneously.
log=[file] Writes log information to the specified file. The
default log file is TestRunner.log in the current
directory.
report=[file] Displays a summary of the errors and memory leaks
recorded in the log file produced during a prior
run of TestRunner. If you don't specify a file,
it will use TestRunner.log in the current directory.
The report is formatted so it can be used as an input
file for the ""test"" or ""skip"" options in a subsequent
run.
profile=[on|off] Set profile=on to enable memory profiling mode.
TestRunner will pause for 10 seconds after the first
test is run to allow you to take a memory snapshot.
After the test run it will sleep instead of terminating
to allow you to take a final memory snapshot.
vendors=[on|off] If vendors=on, Skyline's tests will use vendor readers to
read data files. If vendors=off, tests will read data using
the mzML format. This is useful to isolate memory leaks or
other problems that might occur in the vendor readers.
clipboardcheck When this argument is specified, TestRunner runs
each test once, and makes sure that it did not use
the system clipboard. If a test uses the clipboard,
stress testing might be compromised on a computer
which is running other processes simultaneously.
");
}
private static void ThreadExceptionEventHandler(Object sender, ThreadExceptionEventArgs e)
{
Console.WriteLine("Report from TestRunner.Program.ThreadExceptionEventHandler:");
Console.WriteLine(e.Exception.Message);
if (string.IsNullOrEmpty(e.Exception.StackTrace))
Console.WriteLine("No stacktrace");
else
Console.WriteLine(e.Exception.StackTrace);
if (e.Exception.InnerException != null)
{
Console.WriteLine("Inner exception:");
Console.WriteLine(e.Exception.InnerException.Message);
if (string.IsNullOrEmpty(e.Exception.InnerException.StackTrace))
Console.WriteLine("No stacktrace");
else
Console.WriteLine(e.Exception.InnerException.StackTrace);
}
else
{
Console.WriteLine("No inner exception.");
}
Console.Out.Flush(); // Get this info to TeamCity or SkylineTester ASAP
}
public static IEnumerable<TItem> RandomOrder<TItem>(this IList<TItem> list)
{
int count = list.Count;
var indexOrder = new int[count];
for (int i = 0; i < count; i++)
indexOrder[i] = i;
Random r = new Random();
for (int i = 0; i < count; i++)
{
int index = r.Next(count);
int swap = indexOrder[0];
indexOrder[0] = indexOrder[index];
indexOrder[index] = swap;
}
foreach (int i in indexOrder)
{
yield return list[i];
}
}
}
public class SystemSleep : IDisposable
{
private readonly EXECUTION_STATE _previousState;
public SystemSleep()
{
// Prevent system sleep.
_previousState = SetThreadExecutionState(
EXECUTION_STATE.awaymode_required |
EXECUTION_STATE.continuous |
EXECUTION_STATE.system_required);
}
public void Dispose()
{
SetThreadExecutionState(_previousState);
}
[DllImport("kernel32.dll", CharSet = CharSet.Auto, SetLastError = true)]
private static extern EXECUTION_STATE SetThreadExecutionState(EXECUTION_STATE esFlags);
[Flags]
private enum EXECUTION_STATE : uint
{
awaymode_required = 0x00000040,
continuous = 0x80000000,
system_required = 0x00000001
}
}
}
| 1 | 12,384 | Also not for merge to master. | ProteoWizard-pwiz | .cs |
@@ -201,6 +201,10 @@ class ScheduleDefinition:
raise DagsterInvalidDefinitionError(
f"Found invalid cron schedule '{self._cron_schedule}' for schedule '{name}''."
)
+ if len(self._cron_schedule.split(" ")) != 5:
+ raise DagsterInvalidDefinitionError(
+ f"Found non-standard cron schedule '{self._cron_schedule}' for schedule '{name}''."
+ )
if job is not None:
self._target: Union[DirectTarget, RepoRelativeTarget] = DirectTarget(job) | 1 | import copy
from contextlib import ExitStack
from datetime import datetime
from typing import TYPE_CHECKING, Any, Callable, Dict, List, NamedTuple, Optional, Union, cast
import pendulum
from croniter import croniter
from dagster import check
from dagster.seven import funcsigs
from ...serdes import whitelist_for_serdes
from ...utils import ensure_gen, merge_dicts
from ..decorator_utils import get_function_params
from ..errors import (
DagsterInvalidDefinitionError,
DagsterInvalidInvocationError,
DagsterInvariantViolationError,
ScheduleExecutionError,
user_code_error_boundary,
)
from ..instance import DagsterInstance
from ..instance.ref import InstanceRef
from ..storage.pipeline_run import PipelineRun
from ..storage.tags import check_tags
from .graph_definition import GraphDefinition
from .mode import DEFAULT_MODE_NAME
from .pipeline_definition import PipelineDefinition
from .run_request import InstigatorType, RunRequest, SkipReason
from .target import DirectTarget, RepoRelativeTarget
from .utils import check_valid_name
if TYPE_CHECKING:
from .decorators.schedule import DecoratedScheduleFunction
class ScheduleEvaluationContext:
"""Schedule-specific execution context.
An instance of this class is made available as the first argument to various ScheduleDefinition
functions. It is passed as the first argument to ``run_config_fn``, ``tags_fn``,
and ``should_execute``.
Attributes:
instance_ref (Optional[InstanceRef]): The serialized instance configured to run the schedule
scheduled_execution_time (datetime):
The time in which the execution was scheduled to happen. May differ slightly
from both the actual execution time and the time at which the run config is computed.
Not available in all schedulers - currently only set in deployments using
DagsterDaemonScheduler.
"""
__slots__ = ["_instance_ref", "_scheduled_execution_time", "_exit_stack", "_instance"]
def __init__(
self, instance_ref: Optional[InstanceRef], scheduled_execution_time: Optional[datetime]
):
self._exit_stack = ExitStack()
self._instance = None
self._instance_ref = check.opt_inst_param(instance_ref, "instance_ref", InstanceRef)
self._scheduled_execution_time = check.opt_inst_param(
scheduled_execution_time, "scheduled_execution_time", datetime
)
def __enter__(self):
return self
def __exit__(self, _exception_type, _exception_value, _traceback):
self._exit_stack.close()
@property
def instance(self) -> "DagsterInstance":
# self._instance_ref should only ever be None when this ScheduleEvaluationContext was
# constructed under test.
if not self._instance_ref:
raise DagsterInvariantViolationError(
"Attempted to initialize dagster instance, but no instance reference was provided."
)
if not self._instance:
self._instance = self._exit_stack.enter_context(
DagsterInstance.from_ref(self._instance_ref)
)
return cast(DagsterInstance, self._instance)
@property
def scheduled_execution_time(self) -> Optional[datetime]:
return self._scheduled_execution_time
# Preserve ScheduleExecutionContext for backcompat so type annotations don't break.
ScheduleExecutionContext = ScheduleEvaluationContext
def build_schedule_context(
instance: Optional[DagsterInstance] = None, scheduled_execution_time: Optional[datetime] = None
) -> ScheduleEvaluationContext:
"""Builds schedule execution context using the provided parameters.
The instance provided to ``build_schedule_context`` must be persistent;
DagsterInstance.ephemeral() will result in an error.
Args:
instance (Optional[DagsterInstance]): The dagster instance configured to run the schedule.
scheduled_execution_time (datetime): The time in which the execution was scheduled to
happen. May differ slightly from both the actual execution time and the time at which
the run config is computed.
Examples:
.. code-block:: python
context = build_schedule_context(instance)
daily_schedule.evaluate_tick(context)
"""
check.opt_inst_param(instance, "instance", DagsterInstance)
return ScheduleEvaluationContext(
instance_ref=instance.get_ref() if instance and instance.is_persistent else None,
scheduled_execution_time=check.opt_inst_param(
scheduled_execution_time, "scheduled_execution_time", datetime
),
)
@whitelist_for_serdes
class ScheduleExecutionData(NamedTuple):
run_requests: Optional[List[RunRequest]]
skip_message: Optional[str]
class ScheduleDefinition:
"""Define a schedule that targets a job
Args:
name (Optional[str]): The name of the schedule to create. Defaults to the job name plus
"_schedule".
cron_schedule (str): A valid cron string specifying when the schedule will run, e.g.,
'45 23 * * 6' for a schedule that runs at 11:45 PM every Saturday.
pipeline_name (Optional[str]): (legacy) The name of the pipeline to execute when the schedule runs.
execution_fn (Callable[ScheduleEvaluationContext]): The core evaluation function for the
schedule, which is run at an interval to determine whether a run should be launched or
not. Takes a :py:class:`~dagster.ScheduleEvaluationContext`.
This function must return a generator, which must yield either a single SkipReason
or one or more RunRequest objects.
run_config (Optional[Dict]): The config that parameterizes this execution,
as a dict.
run_config_fn (Optional[Callable[[ScheduleEvaluationContext], [Dict]]]): A function that
takes a ScheduleEvaluationContext object and returns the run configuration that
parameterizes this execution, as a dict. You may set only one of ``run_config``,
``run_config_fn``, and ``execution_fn``.
tags (Optional[Dict[str, str]]): A dictionary of tags (string key-value pairs) to attach
to the scheduled runs.
tags_fn (Optional[Callable[[ScheduleEvaluationContext], Optional[Dict[str, str]]]]): A
function that generates tags to attach to the schedules runs. Takes a
:py:class:`~dagster.ScheduleEvaluationContext` and returns a dictionary of tags (string
key-value pairs). You may set only one of ``tags``, ``tags_fn``, and ``execution_fn``.
solid_selection (Optional[List[str]]): A list of solid subselection (including single
solid names) to execute when the schedule runs. e.g. ``['*some_solid+', 'other_solid']``
mode (Optional[str]): (legacy) The mode to apply when executing this schedule. (default: 'default')
should_execute (Optional[Callable[[ScheduleEvaluationContext], bool]]): A function that runs
at schedule execution time to determine whether a schedule should execute or skip. Takes
a :py:class:`~dagster.ScheduleEvaluationContext` and returns a boolean (``True`` if the
schedule should execute). Defaults to a function that always returns ``True``.
environment_vars (Optional[dict[str, str]]): The environment variables to set for the
schedule
execution_timezone (Optional[str]): Timezone in which the schedule should run.
Supported strings for timezones are the ones provided by the
`IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".
description (Optional[str]): A human-readable description of the schedule.
job (Optional[Union[GraphDefinition, JobDefinition]]): The job that should execute when this
schedule runs.
"""
def __init__(
self,
name: Optional[str] = None,
cron_schedule: Optional[str] = None,
pipeline_name: Optional[str] = None,
run_config: Optional[Any] = None,
run_config_fn: Optional[Callable[..., Any]] = None,
tags: Optional[Dict[str, str]] = None,
tags_fn: Optional[Callable[..., Optional[Dict[str, str]]]] = None,
solid_selection: Optional[List[Any]] = None,
mode: Optional[str] = "default",
should_execute: Optional[Callable[..., bool]] = None,
environment_vars: Optional[Dict[str, str]] = None,
execution_timezone: Optional[str] = None,
execution_fn: Optional[
Union[Callable[[ScheduleEvaluationContext], Any], "DecoratedScheduleFunction"]
] = None,
description: Optional[str] = None,
job: Optional[Union[GraphDefinition, PipelineDefinition]] = None,
):
from .decorators.schedule import DecoratedScheduleFunction
self._cron_schedule = check.str_param(cron_schedule, "cron_schedule")
if not croniter.is_valid(self._cron_schedule):
raise DagsterInvalidDefinitionError(
f"Found invalid cron schedule '{self._cron_schedule}' for schedule '{name}''."
)
if job is not None:
self._target: Union[DirectTarget, RepoRelativeTarget] = DirectTarget(job)
else:
self._target = RepoRelativeTarget(
pipeline_name=check.str_param(pipeline_name, "pipeline_name"),
mode=check.opt_str_param(mode, "mode") or DEFAULT_MODE_NAME,
solid_selection=check.opt_nullable_list_param(
solid_selection, "solid_selection", of_type=str
),
)
if name:
self._name = check_valid_name(name)
elif pipeline_name:
self._name = pipeline_name + "_schedule"
elif job:
self._name = job.name + "_schedule"
self._description = check.opt_str_param(description, "description")
self._environment_vars = check.opt_dict_param(
environment_vars, "environment_vars", key_type=str, value_type=str
)
self._execution_timezone = check.opt_str_param(execution_timezone, "execution_timezone")
if execution_fn and (run_config_fn or tags_fn or should_execute or tags or run_config):
raise DagsterInvalidDefinitionError(
"Attempted to provide both execution_fn and individual run_config/tags arguments "
"to ScheduleDefinition. Must provide only one of the two."
)
elif execution_fn:
self._execution_fn: Optional[
Union[Callable[..., Any], DecoratedScheduleFunction]
] = None
if isinstance(execution_fn, DecoratedScheduleFunction):
self._execution_fn = execution_fn
else:
self._execution_fn = check.opt_callable_param(execution_fn, "execution_fn")
self._run_config_fn = None
else:
if run_config_fn and run_config:
raise DagsterInvalidDefinitionError(
"Attempted to provide both run_config_fn and run_config as arguments"
" to ScheduleDefinition. Must provide only one of the two."
)
self._run_config_fn = check.opt_callable_param(
run_config_fn,
"run_config_fn",
default=lambda _context: check.opt_dict_param(run_config, "run_config"),
)
if tags_fn and tags:
raise DagsterInvalidDefinitionError(
"Attempted to provide both tags_fn and tags as arguments"
" to ScheduleDefinition. Must provide only one of the two."
)
elif tags:
check_tags(tags, "tags")
tags_fn = lambda _context: tags
else:
tags_fn = check.opt_callable_param(tags_fn, "tags_fn", default=lambda _context: {})
should_execute = check.opt_callable_param(
should_execute, "should_execute", default=lambda _context: True
)
def _execution_fn(context):
with user_code_error_boundary(
ScheduleExecutionError,
lambda: f"Error occurred during the execution of should_execute for schedule {name}",
):
if not should_execute(context):
yield SkipReason(
"should_execute function for {schedule_name} returned false.".format(
schedule_name=name
)
)
return
with user_code_error_boundary(
ScheduleExecutionError,
lambda: f"Error occurred during the execution of run_config_fn for schedule {name}",
):
evaluated_run_config = copy.deepcopy(
self._run_config_fn(context)
if is_context_provided(get_function_params(self._run_config_fn))
else self._run_config_fn()
)
with user_code_error_boundary(
ScheduleExecutionError,
lambda: f"Error occurred during the execution of tags_fn for schedule {name}",
):
evaluated_tags = tags_fn(context)
yield RunRequest(
run_key=None,
run_config=evaluated_run_config,
tags=evaluated_tags,
)
self._execution_fn = _execution_fn
if self._execution_timezone:
try:
# Verify that the timezone can be loaded
pendulum.timezone(self._execution_timezone)
except Exception:
raise DagsterInvalidDefinitionError(
"Invalid execution timezone {timezone} for {schedule_name}".format(
schedule_name=name, timezone=self._execution_timezone
)
)
def __call__(self, *args, **kwargs):
from .decorators.schedule import DecoratedScheduleFunction
if not isinstance(self._execution_fn, DecoratedScheduleFunction):
raise DagsterInvalidInvocationError(
"Schedule invocation is only supported for schedules created via the schedule "
"decorators."
)
result = None
if self._execution_fn.has_context_arg:
if len(args) == 0 and len(kwargs) == 0:
raise DagsterInvalidInvocationError(
"Schedule decorated function has context argument, but no context argument was "
"provided when invoking."
)
if len(args) + len(kwargs) > 1:
raise DagsterInvalidInvocationError(
"Schedule invocation received multiple arguments. Only a first "
"positional context parameter should be provided when invoking."
)
context_param_name = get_function_params(self._execution_fn.decorated_fn)[0].name
if args:
context = check.opt_inst_param(
args[0], context_param_name, ScheduleEvaluationContext
)
else:
if context_param_name not in kwargs:
raise DagsterInvalidInvocationError(
f"Schedule invocation expected argument '{context_param_name}'."
)
context = check.opt_inst_param(
kwargs[context_param_name], context_param_name, ScheduleEvaluationContext
)
context = context if context else build_schedule_context()
result = self._execution_fn.decorated_fn(context)
else:
if len(args) + len(kwargs) > 0:
raise DagsterInvalidInvocationError(
"Decorated schedule function takes no arguments, but arguments were provided."
)
result = self._execution_fn.decorated_fn()
if isinstance(result, dict):
return copy.deepcopy(result)
else:
return result
@property
def name(self) -> str:
return self._name
@property
def pipeline_name(self) -> str:
return self._target.pipeline_name
@property
def job_type(self) -> InstigatorType:
return InstigatorType.SCHEDULE
@property
def solid_selection(self) -> Optional[List[Any]]:
return self._target.solid_selection
@property
def mode(self) -> str:
return self._target.mode
@property
def description(self) -> Optional[str]:
return self._description
@property
def cron_schedule(self) -> str:
return self._cron_schedule
@property
def environment_vars(self) -> Dict[str, str]:
return self._environment_vars
@property
def execution_timezone(self) -> Optional[str]:
return self._execution_timezone
def evaluate_tick(self, context: "ScheduleEvaluationContext") -> ScheduleExecutionData:
"""Evaluate schedule using the provided context.
Args:
context (ScheduleEvaluationContext): The context with which to evaluate this schedule.
Returns:
ScheduleExecutionData: Contains list of run requests, or skip message if present.
"""
from .decorators.schedule import DecoratedScheduleFunction
check.inst_param(context, "context", ScheduleEvaluationContext)
if isinstance(self._execution_fn, DecoratedScheduleFunction):
execution_fn = self._execution_fn.wrapped_fn
else:
execution_fn = cast(Callable[[ScheduleEvaluationContext], Any], self._execution_fn)
result = list(ensure_gen(execution_fn(context)))
if not result or result == [None]:
run_requests = []
skip_message = None
elif len(result) == 1:
item = result[0]
check.inst(item, (SkipReason, RunRequest))
run_requests = [item] if isinstance(item, RunRequest) else []
skip_message = item.skip_message if isinstance(item, SkipReason) else None
else:
check.is_list(result, of_type=RunRequest)
check.invariant(
not any(not request.run_key for request in result),
"Schedules that return multiple RunRequests must specify a run_key in each RunRequest",
)
run_requests = result
skip_message = None
# clone all the run requests with the required schedule tags
run_requests_with_schedule_tags = [
RunRequest(
run_key=request.run_key,
run_config=request.run_config,
tags=merge_dicts(request.tags, PipelineRun.tags_for_schedule(self)),
)
for request in run_requests
]
return ScheduleExecutionData(
run_requests=run_requests_with_schedule_tags, skip_message=skip_message
)
def has_loadable_target(self):
return isinstance(self._target, DirectTarget)
def load_target(self):
if isinstance(self._target, DirectTarget):
return self._target.load()
check.failed("Target is not loadable")
def is_context_provided(params: List[funcsigs.Parameter]) -> bool:
return len(params) == 1
| 1 | 17,849 | nit `non-standard` might be a bit subjective - maybe "Cron schedule {blah} is in an unrecognized format. Dagster cron strings must consist of five characters separated by a single space chacater". | dagster-io-dagster | py |
@@ -163,7 +163,6 @@ public class ServerPickerActivityTest {
openCustomEditDialog();
}
setText(com.salesforce.androidsdk.R.id.sf__picker_custom_label, label);
- clickView(com.salesforce.androidsdk.R.id.sf__picker_custom_url);
setText(com.salesforce.androidsdk.R.id.sf__picker_custom_url, url);
}
| 1 | /*
* Copyright (c) 2011-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.samples.restexplorer;
import android.app.Fragment;
import android.app.FragmentManager;
import android.app.FragmentTransaction;
import android.support.test.filters.SmallTest;
import android.support.test.rule.ActivityTestRule;
import android.support.test.runner.AndroidJUnit4;
import android.view.View;
import android.widget.EditText;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.ui.CustomServerUrlEditor;
import com.salesforce.androidsdk.ui.ServerPickerActivity;
import com.salesforce.androidsdk.util.EventsObservable.EventType;
import com.salesforce.androidsdk.util.test.EventsListenerQueue;
import junit.framework.Assert;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.runner.RunWith;
import static android.support.test.espresso.Espresso.onView;
import static android.support.test.espresso.action.ViewActions.click;
import static android.support.test.espresso.action.ViewActions.closeSoftKeyboard;
import static android.support.test.espresso.action.ViewActions.replaceText;
import static android.support.test.espresso.matcher.ViewMatchers.withId;
/**
* Tests for ServerPickerActivity.
*/
@RunWith(AndroidJUnit4.class)
@SmallTest
public class ServerPickerActivityTest {
private EventsListenerQueue eq;
private ServerPickerActivity activity;
@Rule
public ActivityTestRule<ServerPickerActivity> serverPickerActivityTestRule = new ActivityTestRule<>(ServerPickerActivity.class);
@Before
public void setUp() throws Exception {
eq = new EventsListenerQueue();
// Waits for app initialization to complete.
if (!SalesforceSDKManager.hasInstance()) {
eq.waitForEvent(EventType.AppCreateComplete, 5000);
}
activity = serverPickerActivityTestRule.getActivity();
removeFragmentIfRequired();
Assert.assertNotNull("Activity should not be null", activity);
}
@After
public void tearDown() throws Exception {
if (eq != null) {
eq.tearDown();
eq = null;
}
activity.finish();
activity = null;
}
/**
* Test that the cancel button can be clicked and the URL not saved.
*
* @throws Throwable
*/
@Test
public void testCancelButton() throws Throwable {
openCustomEditDialog();
clickView(com.salesforce.androidsdk.R.id.sf__cancel_button);
Assert.assertNull("Custom URL dialog should be closed",
activity.getCustomServerUrlEditor().getDialog());
}
/**
* Test a valid URL can be entered and saved.
*
* @throws Throwable
*/
@Test
public void testAddCustomInstance() throws Throwable {
String label = "My Custom URL";
String url = "https://valid.url.com";
addCustomUrl(label, url);
clickView(com.salesforce.androidsdk.R.id.sf__apply_button);
openCustomEditDialog();
final CustomServerUrlEditor dialog = activity.getCustomServerUrlEditor();
Thread.sleep(3000);
final View rootView = dialog.getRootView();
final EditText txtLabel = rootView.findViewById(com.salesforce.androidsdk.R.id.sf__picker_custom_label);
final EditText txtUrl = rootView.findViewById(com.salesforce.androidsdk.R.id.sf__picker_custom_url);
Assert.assertTrue("Custom Label does not match Expected: " + label
+ " Actual: " + txtLabel.getEditableText().toString(), label
.equalsIgnoreCase(txtLabel.getEditableText().toString()));
Assert.assertTrue("Custom URL does not match Expected: " + url + " Actual: "
+ txtUrl.getEditableText().toString(), url
.equalsIgnoreCase(txtUrl.getEditableText().toString()));
}
/**
* Test that "https" is required.
*
* @throws Throwable
*/
@Test
public void testAddInvalidUrl() throws Throwable {
String label = "My URL";
String url = "http://invalid.url.com";
addCustomUrl(label, url);
clickView(com.salesforce.androidsdk.R.id.sf__apply_button);
Assert.assertTrue("Custom URL dialog should still be open",
activity.getCustomServerUrlEditor().getDialog().isShowing());
url = "https://valid.url.com";
addCustomUrl(label, url);
clickView(com.salesforce.androidsdk.R.id.sf__apply_button);
Assert.assertNull("Custom URL dialog should be closed",
activity.getCustomServerUrlEditor().getDialog());
}
private void openCustomEditDialog() throws Throwable {
clickView(com.salesforce.androidsdk.R.id.sf__show_custom_url_edit);
final CustomServerUrlEditor dialog = activity.getCustomServerUrlEditor();
Thread.sleep(3000);
final View rootView = dialog.getRootView();
Assert.assertNotNull("Root view should not be null", rootView);
clickView(com.salesforce.androidsdk.R.id.sf__picker_custom_label);
}
private void addCustomUrl(String label, String url) throws Throwable {
if (!activity.getCustomServerUrlEditor().isVisible()) {
openCustomEditDialog();
}
setText(com.salesforce.androidsdk.R.id.sf__picker_custom_label, label);
clickView(com.salesforce.androidsdk.R.id.sf__picker_custom_url);
setText(com.salesforce.androidsdk.R.id.sf__picker_custom_url, url);
}
private void removeFragmentIfRequired() {
final FragmentManager fm = activity.getFragmentManager();
final Fragment dialog = activity.getFragmentManager().findFragmentByTag("custom_server_dialog");
if (dialog != null && dialog.isAdded()) {
final FragmentTransaction ft = fm.beginTransaction();
ft.remove(dialog);
ft.commit();
}
}
private void setText(final int viewId, final String text) {
try {
onView(withId(viewId)).perform(replaceText(text), closeSoftKeyboard());
} catch (Throwable t) {
Assert.fail("Failed to set text " + text);
}
}
private void clickView(final int resId) {
try {
onView(withId(resId)).perform(click());
} catch (Throwable t) {
Assert.fail("Failed to click view " + resId);
}
}
}
| 1 | 16,718 | ARM emulator gets tripped up on the copy/paste menu when tapping the text. Tapping on the text field isn't necessary for setting text. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -68,11 +68,12 @@ public class SpellChecker {
}
char[] wordChars = word.toCharArray();
- if (dictionary.isForbiddenWord(wordChars, wordChars.length)) {
- return false;
+ Boolean simpleResult = checkSimpleWord(wordChars, wordChars.length, null);
+ if (simpleResult != null) {
+ return simpleResult;
}
- if (checkWord(wordChars, wordChars.length, null)) {
+ if (checkCompounds(wordChars, wordChars.length, null)) {
return true;
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis.hunspell;
import static org.apache.lucene.analysis.hunspell.Dictionary.FLAG_UNSET;
import static org.apache.lucene.analysis.hunspell.WordContext.COMPOUND_BEGIN;
import static org.apache.lucene.analysis.hunspell.WordContext.COMPOUND_END;
import static org.apache.lucene.analysis.hunspell.WordContext.COMPOUND_MIDDLE;
import static org.apache.lucene.analysis.hunspell.WordContext.SIMPLE_WORD;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.IntsRef;
/**
* A spell checker based on Hunspell dictionaries. The objects of this class are not thread-safe
* (but a single underlying Dictionary can be shared by multiple spell-checkers in different
* threads). Not all Hunspell features are supported yet.
*/
public class SpellChecker {
final Dictionary dictionary;
final Stemmer stemmer;
public SpellChecker(Dictionary dictionary) {
this.dictionary = dictionary;
stemmer = new Stemmer(dictionary);
}
/** @return whether the given word's spelling is considered correct according to Hunspell rules */
public boolean spell(String word) {
if (word.isEmpty()) return true;
if (dictionary.needsInputCleaning) {
word = dictionary.cleanInput(word, new StringBuilder()).toString();
}
if (word.endsWith(".")) {
return spellWithTrailingDots(word);
}
return spellClean(word);
}
private boolean spellClean(String word) {
if (isNumber(word)) {
return true;
}
char[] wordChars = word.toCharArray();
if (dictionary.isForbiddenWord(wordChars, wordChars.length)) {
return false;
}
if (checkWord(wordChars, wordChars.length, null)) {
return true;
}
WordCase wc = stemmer.caseOf(wordChars, wordChars.length);
if ((wc == WordCase.UPPER || wc == WordCase.TITLE)) {
Stemmer.CaseVariationProcessor variationProcessor =
(variant, varLength, originalCase) -> !checkWord(variant, varLength, originalCase);
if (!stemmer.varyCase(wordChars, wordChars.length, wc, variationProcessor)) {
return true;
}
}
if (dictionary.breaks.isNotEmpty() && !hasTooManyBreakOccurrences(word)) {
return tryBreaks(word);
}
return false;
}
private boolean spellWithTrailingDots(String word) {
int length = word.length() - 1;
while (length > 0 && word.charAt(length - 1) == '.') {
length--;
}
return spellClean(word.substring(0, length)) || spellClean(word.substring(0, length + 1));
}
boolean checkWord(String word) {
return checkWord(word.toCharArray(), word.length(), null);
}
Boolean checkSimpleWord(char[] wordChars, int length, WordCase originalCase) {
if (dictionary.isForbiddenWord(wordChars, length)) {
return false;
}
if (findStem(wordChars, 0, length, originalCase, SIMPLE_WORD) != null) {
return true;
}
return null;
}
private boolean checkWord(char[] wordChars, int length, WordCase originalCase) {
Boolean simpleResult = checkSimpleWord(wordChars, length, originalCase);
if (simpleResult != null) {
return simpleResult;
}
if (dictionary.compoundRules != null
&& checkCompoundRules(wordChars, 0, length, new ArrayList<>())) {
return true;
}
if (dictionary.compoundBegin != FLAG_UNSET || dictionary.compoundFlag != FLAG_UNSET) {
return checkCompounds(new CharsRef(wordChars, 0, length), originalCase, null);
}
return false;
}
private CharsRef findStem(
char[] wordChars, int offset, int length, WordCase originalCase, WordContext context) {
CharsRef[] result = {null};
stemmer.doStem(
wordChars,
offset,
length,
originalCase,
context,
(stem, formID, stemException) -> {
if (acceptsStem(formID)) {
result[0] = stem;
}
return false;
});
return result[0];
}
boolean acceptsStem(int formID) {
return true;
}
private boolean checkCompounds(CharsRef word, WordCase originalCase, CompoundPart prev) {
if (prev != null && prev.index > dictionary.compoundMax - 2) return false;
int limit = word.length - dictionary.compoundMin + 1;
for (int breakPos = dictionary.compoundMin; breakPos < limit; breakPos++) {
WordContext context = prev == null ? COMPOUND_BEGIN : COMPOUND_MIDDLE;
int breakOffset = word.offset + breakPos;
if (mayBreakIntoCompounds(word.chars, word.offset, word.length, breakOffset)) {
CharsRef stem = findStem(word.chars, word.offset, breakPos, originalCase, context);
if (stem == null
&& dictionary.simplifiedTriple
&& word.chars[breakOffset - 1] == word.chars[breakOffset]) {
stem = findStem(word.chars, word.offset, breakPos + 1, originalCase, context);
}
if (stem != null && (prev == null || prev.mayCompound(stem, breakPos, originalCase))) {
CompoundPart part = new CompoundPart(prev, word, breakPos, stem, null);
if (checkCompoundsAfter(originalCase, part)) {
return true;
}
}
}
if (checkCompoundPatternReplacements(word, breakPos, originalCase, prev)) {
return true;
}
}
return false;
}
private boolean checkCompoundPatternReplacements(
CharsRef word, int pos, WordCase originalCase, CompoundPart prev) {
for (CheckCompoundPattern pattern : dictionary.checkCompoundPatterns) {
CharsRef expanded = pattern.expandReplacement(word, pos);
if (expanded != null) {
WordContext context = prev == null ? COMPOUND_BEGIN : COMPOUND_MIDDLE;
int breakPos = pos + pattern.endLength();
CharsRef stem = findStem(expanded.chars, expanded.offset, breakPos, originalCase, context);
if (stem != null) {
CompoundPart part = new CompoundPart(prev, expanded, breakPos, stem, pattern);
if (checkCompoundsAfter(originalCase, part)) {
return true;
}
}
}
}
return false;
}
private boolean checkCompoundsAfter(WordCase originalCase, CompoundPart prev) {
CharsRef word = prev.tail;
int breakPos = prev.length;
int remainingLength = word.length - breakPos;
int breakOffset = word.offset + breakPos;
CharsRef tailStem =
findStem(word.chars, breakOffset, remainingLength, originalCase, COMPOUND_END);
if (tailStem != null
&& !(dictionary.checkCompoundDup && equalsIgnoreCase(prev.stem, tailStem))
&& !hasForceUCaseProblem(word.chars, breakOffset, remainingLength, originalCase)
&& prev.mayCompound(tailStem, remainingLength, originalCase)) {
return true;
}
CharsRef tail = new CharsRef(word.chars, breakOffset, remainingLength);
return checkCompounds(tail, originalCase, prev);
}
private boolean hasForceUCaseProblem(
char[] chars, int offset, int length, WordCase originalCase) {
if (dictionary.forceUCase == FLAG_UNSET) return false;
if (originalCase == WordCase.TITLE || originalCase == WordCase.UPPER) return false;
IntsRef forms = dictionary.lookupWord(chars, offset, length);
return forms != null && dictionary.hasFlag(forms, dictionary.forceUCase);
}
private boolean equalsIgnoreCase(CharsRef cr1, CharsRef cr2) {
return cr1.toString().equalsIgnoreCase(cr2.toString());
}
private class CompoundPart {
final CompoundPart prev;
final int index, length;
final CharsRef tail, stem;
final CheckCompoundPattern enablingPattern;
CompoundPart(
CompoundPart prev, CharsRef tail, int length, CharsRef stem, CheckCompoundPattern enabler) {
this.prev = prev;
this.tail = tail;
this.length = length;
this.stem = stem;
index = prev == null ? 1 : prev.index + 1;
enablingPattern = enabler;
}
@Override
public String toString() {
return (prev == null ? "" : prev + "+") + tail.subSequence(0, length);
}
boolean mayCompound(CharsRef nextStem, int nextPartLength, WordCase originalCase) {
boolean patternsOk =
enablingPattern != null
? enablingPattern.prohibitsCompounding(tail, length, stem, nextStem)
: dictionary.checkCompoundPatterns.stream()
.noneMatch(p -> p.prohibitsCompounding(tail, length, stem, nextStem));
if (!patternsOk) {
return false;
}
if (dictionary.checkCompoundRep
&& isMisspelledSimpleWord(length + nextPartLength, originalCase)) {
return false;
}
String spaceSeparated =
new String(tail.chars, tail.offset, length)
+ " "
+ new String(tail.chars, tail.offset + length, nextPartLength);
return !checkWord(spaceSeparated);
}
private boolean isMisspelledSimpleWord(int length, WordCase originalCase) {
String word = new String(tail.chars, tail.offset, length);
for (RepEntry entry : dictionary.repTable) {
if (entry.isMiddle()) {
for (String sug : entry.substitute(word)) {
if (findStem(sug.toCharArray(), 0, sug.length(), originalCase, SIMPLE_WORD) != null) {
return true;
}
}
}
}
return false;
}
}
private boolean mayBreakIntoCompounds(char[] chars, int offset, int length, int breakPos) {
if (dictionary.checkCompoundCase) {
if (Character.isUpperCase(chars[breakPos - 1]) || Character.isUpperCase(chars[breakPos])) {
return false;
}
}
if (dictionary.checkCompoundTriple && chars[breakPos - 1] == chars[breakPos]) {
//noinspection RedundantIfStatement
if (breakPos > offset + 1 && chars[breakPos - 2] == chars[breakPos - 1]
|| breakPos < length - 1 && chars[breakPos] == chars[breakPos + 1]) {
return false;
}
}
return true;
}
private boolean checkCompoundRules(
char[] wordChars, int offset, int length, List<IntsRef> words) {
if (words.size() >= 100) return false;
int limit = length - dictionary.compoundMin + 1;
for (int breakPos = dictionary.compoundMin; breakPos < limit; breakPos++) {
IntsRef forms = dictionary.lookupWord(wordChars, offset, breakPos);
if (forms != null) {
words.add(forms);
if (dictionary.compoundRules != null
&& dictionary.compoundRules.stream().anyMatch(r -> r.mayMatch(words))) {
if (checkLastCompoundPart(wordChars, offset + breakPos, length - breakPos, words)) {
return true;
}
if (checkCompoundRules(wordChars, offset + breakPos, length - breakPos, words)) {
return true;
}
}
words.remove(words.size() - 1);
}
}
return false;
}
private boolean checkLastCompoundPart(
char[] wordChars, int start, int length, List<IntsRef> words) {
IntsRef forms = dictionary.lookupWord(wordChars, start, length);
if (forms == null) return false;
words.add(forms);
boolean result = dictionary.compoundRules.stream().anyMatch(r -> r.fullyMatches(words));
words.remove(words.size() - 1);
return result;
}
private static boolean isNumber(String s) {
int i = 0;
while (i < s.length()) {
char c = s.charAt(i);
if (isDigit(c)) {
i++;
} else if (c == '.' || c == ',' || c == '-') {
if (i == 0 || i >= s.length() - 1 || !isDigit(s.charAt(i + 1))) {
return false;
}
i += 2;
} else {
return false;
}
}
return true;
}
private static boolean isDigit(char c) {
return c >= '0' && c <= '9';
}
private boolean tryBreaks(String word) {
for (String br : dictionary.breaks.starting) {
if (word.length() > br.length() && word.startsWith(br)) {
if (spell(word.substring(br.length()))) {
return true;
}
}
}
for (String br : dictionary.breaks.ending) {
if (word.length() > br.length() && word.endsWith(br)) {
if (spell(word.substring(0, word.length() - br.length()))) {
return true;
}
}
}
for (String br : dictionary.breaks.middle) {
int pos = word.indexOf(br);
if (canBeBrokenAt(word, br, pos)) {
return true;
}
// try to break at the second occurrence
// to recognize dictionary words with a word break
if (pos > 0 && canBeBrokenAt(word, br, word.indexOf(br, pos + 1))) {
return true;
}
}
return false;
}
private boolean hasTooManyBreakOccurrences(String word) {
int occurrences = 0;
for (String br : dictionary.breaks.middle) {
int pos = 0;
while ((pos = word.indexOf(br, pos)) >= 0) {
if (++occurrences >= 10) return true;
pos += br.length();
}
}
return false;
}
private boolean canBeBrokenAt(String word, String breakStr, int breakPos) {
return breakPos > 0
&& breakPos < word.length() - breakStr.length()
&& spell(word.substring(0, breakPos))
&& spell(word.substring(breakPos + breakStr.length()));
}
public List<String> suggest(String word) {
if (word.length() >= 100) return Collections.emptyList();
if (dictionary.needsInputCleaning) {
word = dictionary.cleanInput(word, new StringBuilder()).toString();
}
WordCase wordCase = WordCase.caseOf(word);
SpellChecker suggestionSpeller =
new SpellChecker(dictionary) {
@Override
boolean acceptsStem(int formID) {
return !dictionary.hasFlag(formID, dictionary.noSuggest)
&& !dictionary.hasFlag(formID, dictionary.subStandard);
}
};
ModifyingSuggester modifier = new ModifyingSuggester(suggestionSpeller);
Set<String> suggestions = modifier.suggest(word, wordCase);
if (!modifier.hasGoodSuggestions && dictionary.maxNGramSuggestions > 0) {
suggestions.addAll(
new GeneratingSuggester(suggestionSpeller)
.suggest(dictionary.toLowerCase(word), wordCase, suggestions));
}
if (word.contains("-") && suggestions.stream().noneMatch(s -> s.contains("-"))) {
suggestions.addAll(modifyChunksBetweenDashes(word));
}
Set<String> result = new LinkedHashSet<>();
for (String candidate : suggestions) {
result.add(adjustSuggestionCase(candidate, wordCase, word));
if (wordCase == WordCase.UPPER && dictionary.checkSharpS && candidate.contains("ß")) {
result.add(candidate);
}
}
return result.stream().map(this::cleanOutput).collect(Collectors.toList());
}
private String adjustSuggestionCase(String candidate, WordCase originalCase, String original) {
if (originalCase == WordCase.UPPER) {
String upper = candidate.toUpperCase(Locale.ROOT);
if (upper.contains(" ") || spell(upper)) {
return upper;
}
}
if (Character.isUpperCase(original.charAt(0))) {
String title = Character.toUpperCase(candidate.charAt(0)) + candidate.substring(1);
if (title.contains(" ") || spell(title)) {
return title;
}
}
return candidate;
}
private List<String> modifyChunksBetweenDashes(String word) {
List<String> result = new ArrayList<>();
int chunkStart = 0;
while (chunkStart < word.length()) {
int chunkEnd = word.indexOf('-', chunkStart);
if (chunkEnd < 0) {
chunkEnd = word.length();
}
if (chunkEnd > chunkStart) {
String chunk = word.substring(chunkStart, chunkEnd);
if (!spell(chunk)) {
for (String chunkSug : suggest(chunk)) {
String replaced = word.substring(0, chunkStart) + chunkSug + word.substring(chunkEnd);
if (!dictionary.isForbiddenWord(replaced.toCharArray(), replaced.length())) {
result.add(replaced);
}
}
}
}
chunkStart = chunkEnd + 1;
}
return result;
}
private String cleanOutput(String s) {
if (!dictionary.needsOutputCleaning) return s;
try {
StringBuilder sb = new StringBuilder(s);
Dictionary.applyMappings(dictionary.oconv, sb);
return sb.toString();
} catch (IOException bogus) {
throw new RuntimeException(bogus);
}
}
}
| 1 | 40,284 | extra word lookup removed | apache-lucene-solr | java |
@@ -520,6 +520,12 @@ webdriver.WebDriver.prototype.call = function(fn, opt_scope, var_args) {
* Schedules a command to wait for a condition to hold, as defined by some
* user supplied function. If any errors occur while evaluating the wait, they
* will be allowed to propagate.
+ *
+ * <p>In the event a condition returns a Promise, the polling loop will wait
+ * for it to be resolved and use the resolved value for evaluating whether the
+ * condition has been satisfied. The resolution time for a promise is factored
+ * into whether a wait has timed out.
+ *
* @param {function():boolean} fn The function to evaluate as a wait condition.
* @param {number} timeout How long to wait for the condition to be true.
* @param {string=} opt_message An optional message to use if the wait times | 1 | // Copyright 2011 Software Freedom Conservancy. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview The heart of the WebDriver JavaScript API.
*/
goog.provide('webdriver.Alert');
goog.provide('webdriver.UnhandledAlertError');
goog.provide('webdriver.WebDriver');
goog.provide('webdriver.WebElement');
goog.require('bot.Error');
goog.require('bot.ErrorCode');
goog.require('bot.response');
goog.require('goog.array');
goog.require('goog.object');
goog.require('webdriver.ActionSequence');
goog.require('webdriver.Command');
goog.require('webdriver.CommandName');
goog.require('webdriver.Key');
goog.require('webdriver.Locator');
goog.require('webdriver.Session');
goog.require('webdriver.logging');
goog.require('webdriver.promise');
//////////////////////////////////////////////////////////////////////////////
//
// webdriver.WebDriver
//
//////////////////////////////////////////////////////////////////////////////
/**
* Creates a new WebDriver client, which provides control over a browser.
*
* Every WebDriver command returns a {@code webdriver.promise.Promise} that
* represents the result of that command. Callbacks may be registered on this
* object to manipulate the command result or catch an expected error. Any
* commands scheduled with a callback are considered sub-commands and will
* execute before the next command in the current frame. For example:
* <pre><code>
* var message = [];
* driver.call(message.push, message, 'a').then(function() {
* driver.call(message.push, message, 'b');
* });
* driver.call(message.push, message, 'c');
* driver.call(function() {
* alert('message is abc? ' + (message.join('') == 'abc'));
* });
* </code></pre>
*
* @param {!(webdriver.Session|webdriver.promise.Promise)} session Either a
* known session or a promise that will be resolved to a session.
* @param {!webdriver.CommandExecutor} executor The executor to use when
* sending commands to the browser.
* @param {webdriver.promise.ControlFlow=} opt_flow The flow to
* schedule commands through. Defaults to the active flow object.
* @constructor
*/
webdriver.WebDriver = function(session, executor, opt_flow) {
/** @private {!(webdriver.Session|webdriver.promise.Promise)} */
this.session_ = session;
/** @private {!webdriver.CommandExecutor} */
this.executor_ = executor;
/** @private {!webdriver.promise.ControlFlow} */
this.flow_ = opt_flow || webdriver.promise.controlFlow();
};
/**
* Creates a new WebDriver client for an existing session.
* @param {!webdriver.CommandExecutor} executor Command executor to use when
* querying for session details.
* @param {string} sessionId ID of the session to attach to.
* @return {!webdriver.WebDriver} A new client for the specified session.
*/
webdriver.WebDriver.attachToSession = function(executor, sessionId) {
return webdriver.WebDriver.acquireSession_(executor,
new webdriver.Command(webdriver.CommandName.DESCRIBE_SESSION).
setParameter('sessionId', sessionId),
'WebDriver.attachToSession()');
};
/**
* Creates a new WebDriver session.
* @param {!webdriver.CommandExecutor} executor The executor to create the new
* session with.
* @param {!webdriver.Capabilities} desiredCapabilities The desired
* capabilities for the new session.
* @return {!webdriver.WebDriver} The driver for the newly created session.
*/
webdriver.WebDriver.createSession = function(executor, desiredCapabilities) {
return webdriver.WebDriver.acquireSession_(executor,
new webdriver.Command(webdriver.CommandName.NEW_SESSION).
setParameter('desiredCapabilities', desiredCapabilities),
'WebDriver.createSession()');
};
/**
* Sends a command to the server that is expected to return the details for a
* {@link webdriver.Session}. This may either be an existing session, or a
* newly created one.
* @param {!webdriver.CommandExecutor} executor Command executor to use when
* querying for session details.
* @param {!webdriver.Command} command The command to send to fetch the session
* details.
* @param {string} description A descriptive debug label for this action.
* @return {!webdriver.WebDriver} A new WebDriver client for the session.
* @private
*/
webdriver.WebDriver.acquireSession_ = function(executor, command, description) {
var session = webdriver.promise.controlFlow().execute(function() {
return webdriver.WebDriver.executeCommand_(executor, command).
then(function(response) {
bot.response.checkResponse(response);
return new webdriver.Session(response['sessionId'],
response['value']);
});
}, description);
return new webdriver.WebDriver(session, executor);
};
/**
* Converts an object to its JSON representation in the WebDriver wire protocol.
* When converting values of type object, the following steps will be taken:
* <ol>
* <li>if the object provides a "toWireValue" function, the return value will
* be returned in its fully resolved state (e.g. this function may return
* promise values)</li>
* <li>if the object provides a "toJSON" function, the return value of this
* function will be returned</li>
* <li>otherwise, the value of each key will be recursively converted according
* to the rules above.</li>
* </ol>
*
* @param {*} obj The object to convert.
* @return {!webdriver.promise.Promise} A promise that will resolve to the
* input value's JSON representation.
* @private
* @see http://code.google.com/p/selenium/wiki/JsonWireProtocol
*/
webdriver.WebDriver.toWireValue_ = function(obj) {
switch (goog.typeOf(obj)) {
case 'array':
return webdriver.promise.fullyResolved(
goog.array.map(/** @type {!Array} */ (obj),
webdriver.WebDriver.toWireValue_));
case 'object':
if (goog.isFunction(obj.toWireValue)) {
return webdriver.promise.fullyResolved(obj.toWireValue());
}
if (goog.isFunction(obj.toJSON)) {
return webdriver.promise.fulfilled(obj.toJSON());
}
if (goog.isNumber(obj.nodeType) && goog.isString(obj.nodeName)) {
throw Error([
'Invalid argument type: ', obj.nodeName, '(', obj.nodeType, ')'
].join(''));
}
return webdriver.promise.fullyResolved(
goog.object.map(/** @type {!Object} */ (obj),
webdriver.WebDriver.toWireValue_));
case 'function':
return webdriver.promise.fulfilled('' + obj);
case 'undefined':
return webdriver.promise.fulfilled(null);
default:
return webdriver.promise.fulfilled(obj);
}
};
/**
* Converts a value from its JSON representation according to the WebDriver wire
* protocol. Any JSON object containing a
* {@code webdriver.WebElement.ELEMENT_KEY} key will be decoded to a
* {@code webdriver.WebElement} object. All other values will be passed through
* as is.
* @param {!webdriver.WebDriver} driver The driver instance to use as the
* parent of any unwrapped {@code webdriver.WebElement} values.
* @param {*} value The value to convert.
* @return {*} The converted value.
* @see http://code.google.com/p/selenium/wiki/JsonWireProtocol
* @private
*/
webdriver.WebDriver.fromWireValue_ = function(driver, value) {
if (goog.isArray(value)) {
value = goog.array.map(/**@type {goog.array.ArrayLike}*/ (value),
goog.partial(webdriver.WebDriver.fromWireValue_, driver));
} else if (value && goog.isObject(value) && !goog.isFunction(value)) {
if (webdriver.WebElement.ELEMENT_KEY in value) {
value = new webdriver.WebElement(driver,
value[webdriver.WebElement.ELEMENT_KEY]);
} else {
value = goog.object.map(/**@type {!Object}*/ (value),
goog.partial(webdriver.WebDriver.fromWireValue_, driver));
}
}
return value;
};
/**
* Translates a command to its wire-protocol representation before passing it
* to the given {@code executor} for execution.
* @param {!webdriver.CommandExecutor} executor The executor to use.
* @param {!webdriver.Command} command The command to execute.
* @return {!webdriver.promise.Promise} A promise that will resolve with the
* command response.
* @private
*/
webdriver.WebDriver.executeCommand_ = function(executor, command) {
return webdriver.promise.fullyResolved(command.getParameters()).
then(webdriver.WebDriver.toWireValue_).
then(function(parameters) {
command.setParameters(parameters);
return webdriver.promise.checkedNodeCall(
goog.bind(executor.execute, executor, command));
});
};
/**
* @return {!webdriver.promise.ControlFlow} The control flow used by this
* instance.
*/
webdriver.WebDriver.prototype.controlFlow = function() {
return this.flow_;
};
/**
* Schedules a {@code webdriver.Command} to be executed by this driver's
* {@code webdriver.CommandExecutor}.
* @param {!webdriver.Command} command The command to schedule.
* @param {string} description A description of the command for debugging.
* @return {!webdriver.promise.Promise} A promise that will be resolved with
* the command result.
*/
webdriver.WebDriver.prototype.schedule = function(command, description) {
var self = this;
checkHasNotQuit();
command.setParameter('sessionId', this.session_);
var flow = this.flow_;
return flow.execute(function() {
// A call to WebDriver.quit() may have been scheduled in the same event
// loop as this |command|, which would prevent us from detecting that the
// driver has quit above. Therefore, we need to make another quick check.
// We still check above so we can fail as early as possible.
checkHasNotQuit();
return webdriver.WebDriver.executeCommand_(self.executor_, command);
}, description).then(function(response) {
try {
bot.response.checkResponse(response);
} catch (ex) {
var value = response['value'];
if (ex.code === bot.ErrorCode.MODAL_DIALOG_OPENED) {
var text = value && value['alert'] ? value['alert']['text'] : '';
throw new webdriver.UnhandledAlertError(ex.message,
new webdriver.Alert(self, text));
}
throw ex;
}
return webdriver.WebDriver.fromWireValue_(self, response['value']);
});
function checkHasNotQuit() {
if (!self.session_) {
throw new Error('This driver instance does not have a valid session ID ' +
'(did you call WebDriver.quit()?) and may no longer be ' +
'used.');
}
}
};
// ----------------------------------------------------------------------------
// Client command functions:
// ----------------------------------------------------------------------------
/**
* @return {!webdriver.promise.Promise} A promise for this client's session.
*/
webdriver.WebDriver.prototype.getSession = function() {
return webdriver.promise.when(this.session_);
};
/**
* @return {!webdriver.promise.Promise} A promise that will resolve with the
* this instance's capabilities.
*/
webdriver.WebDriver.prototype.getCapabilities = function() {
return webdriver.promise.when(this.session_, function(session) {
return session.getCapabilities();
});
};
/**
* Schedules a command to quit the current session. After calling quit, this
* instance will be invalidated and may no longer be used to issue commands
* against the browser.
* @return {!webdriver.promise.Promise} A promise that will be resolved when
* the command has completed.
*/
webdriver.WebDriver.prototype.quit = function() {
var result = this.schedule(
new webdriver.Command(webdriver.CommandName.QUIT),
'WebDriver.quit()');
// Delete our session ID when the quit command finishes; this will allow us to
// throw an error when attemnpting to use a driver post-quit.
return result.thenFinally(goog.bind(function() {
delete this.session_;
}, this));
};
/**
* Creates a new action sequence using this driver. The sequence will not be
* scheduled for execution until {@link webdriver.ActionSequence#perform} is
* called. Example:
* <pre><code>
* driver.actions().
* mouseDown(element1).
* mouseMove(element2).
* mouseUp().
* perform();
* </code></pre>
* @return {!webdriver.ActionSequence} A new action sequence for this instance.
*/
webdriver.WebDriver.prototype.actions = function() {
return new webdriver.ActionSequence(this);
};
/**
* Schedules a command to execute JavaScript in the context of the currently
* selected frame or window. The script fragment will be executed as the body
* of an anonymous function. If the script is provided as a function object,
* that function will be converted to a string for injection into the target
* window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the {@code arguments} object.
* Arguments may be a boolean, number, string, or {@code webdriver.WebElement}.
* Arrays and objects may also be used as script arguments as long as each item
* adheres to the types previously mentioned.
*
* The script may refer to any variables accessible from the current window.
* Furthermore, the script will execute in the window's context, thus
* {@code document} may be used to refer to the current document. Any local
* variables will not be available once the script has finished executing,
* though global variables will persist.
*
* If the script has a return value (i.e. if the script contains a return
* statement), then the following steps will be taken for resolving this
* functions return value:
* <ul>
* <li>For a HTML element, the value will resolve to a
* {@code webdriver.WebElement}</li>
* <li>Null and undefined return values will resolve to null</li>
* <li>Booleans, numbers, and strings will resolve as is</li>
* <li>Functions will resolve to their string representation</li>
* <li>For arrays and objects, each member item will be converted according to
* the rules above</li>
* </ul>
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} var_args The arguments to pass to the script.
* @return {!webdriver.promise.Promise} A promise that will resolve to the
* scripts return value.
*/
webdriver.WebDriver.prototype.executeScript = function(script, var_args) {
if (goog.isFunction(script)) {
script = 'return (' + script + ').apply(null, arguments);';
}
return this.schedule(
new webdriver.Command(webdriver.CommandName.EXECUTE_SCRIPT).
setParameter('script', script).
setParameter('args', goog.array.slice(arguments, 1)),
'WebDriver.executeScript()');
};
/**
* Schedules a command to execute asynchronous JavaScript in the context of the
* currently selected frame or window. The script fragment will be executed as
* the body of an anonymous function. If the script is provided as a function
* object, that function will be converted to a string for injection into the
* target window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the {@code arguments} object.
* Arguments may be a boolean, number, string, or {@code webdriver.WebElement}.
* Arrays and objects may also be used as script arguments as long as each item
* adheres to the types previously mentioned.
*
* Unlike executing synchronous JavaScript with
* {@code webdriver.WebDriver.prototype.executeScript}, scripts executed with
* this function must explicitly signal they are finished by invoking the
* provided callback. This callback will always be injected into the
* executed function as the last argument, and thus may be referenced with
* {@code arguments[arguments.length - 1]}. The following steps will be taken
* for resolving this functions return value against the first argument to the
* script's callback function:
* <ul>
* <li>For a HTML element, the value will resolve to a
* {@code webdriver.WebElement}</li>
* <li>Null and undefined return values will resolve to null</li>
* <li>Booleans, numbers, and strings will resolve as is</li>
* <li>Functions will resolve to their string representation</li>
* <li>For arrays and objects, each member item will be converted according to
* the rules above</li>
* </ul>
*
* Example #1: Performing a sleep that is synchronized with the currently
* selected window:
* <code><pre>
* var start = new Date().getTime();
* driver.executeAsyncScript(
* 'window.setTimeout(arguments[arguments.length - 1], 500);').
* then(function() {
* console.log('Elapsed time: ' + (new Date().getTime() - start) + ' ms');
* });
* </pre></code>
*
* Example #2: Synchronizing a test with an AJAX application:
* <code><pre>
* var button = driver.findElement(By.id('compose-button'));
* button.click();
* driver.executeAsyncScript(
* 'var callback = arguments[arguments.length - 1];' +
* 'mailClient.getComposeWindowWidget().onload(callback);');
* driver.switchTo().frame('composeWidget');
* driver.findElement(By.id('to')).sendKEys('[email protected]');
* </pre></code>
*
* Example #3: Injecting a XMLHttpRequest and waiting for the result. In this
* example, the inject script is specified with a function literal. When using
* this format, the function is converted to a string for injection, so it
* should not reference any symbols not defined in the scope of the page under
* test.
* <code><pre>
* driver.executeAsyncScript(function() {
* var callback = arguments[arguments.length - 1];
* var xhr = new XMLHttpRequest();
* xhr.open("GET", "/resource/data.json", true);
* xhr.onreadystatechange = function() {
* if (xhr.readyState == 4) {
* callback(xhr.resposneText);
* }
* }
* xhr.send('');
* }).then(function(str) {
* console.log(JSON.parse(str)['food']);
* });
* </pre></code>
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} var_args The arguments to pass to the script.
* @return {!webdriver.promise.Promise} A promise that will resolve to the
* scripts return value.
*/
webdriver.WebDriver.prototype.executeAsyncScript = function(script, var_args) {
if (goog.isFunction(script)) {
script = 'return (' + script + ').apply(null, arguments);';
}
return this.schedule(
new webdriver.Command(webdriver.CommandName.EXECUTE_ASYNC_SCRIPT).
setParameter('script', script).
setParameter('args', goog.array.slice(arguments, 1)),
'WebDriver.executeScript()');
};
/**
* Schedules a command to execute a custom function.
* @param {!Function} fn The function to execute.
* @param {Object=} opt_scope The object in whose scope to execute the function.
* @param {...*} var_args Any arguments to pass to the function.
* @return {!webdriver.promise.Promise} A promise that will be resolved with the
* function's result.
*/
webdriver.WebDriver.prototype.call = function(fn, opt_scope, var_args) {
var args = goog.array.slice(arguments, 2);
var flow = this.flow_;
return flow.execute(function() {
return webdriver.promise.fullyResolved(args).then(function(args) {
return fn.apply(opt_scope, args);
});
}, 'WebDriver.call(' + (fn.name || 'function') + ')');
};
/**
* Schedules a command to wait for a condition to hold, as defined by some
* user supplied function. If any errors occur while evaluating the wait, they
* will be allowed to propagate.
* @param {function():boolean} fn The function to evaluate as a wait condition.
* @param {number} timeout How long to wait for the condition to be true.
* @param {string=} opt_message An optional message to use if the wait times
* out.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* wait condition has been satisfied.
*/
webdriver.WebDriver.prototype.wait = function(fn, timeout, opt_message) {
return this.flow_.wait(fn, timeout, opt_message);
};
/**
* Schedules a command to make the driver sleep for the given amount of time.
* @param {number} ms The amount of time, in milliseconds, to sleep.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* sleep has finished.
*/
webdriver.WebDriver.prototype.sleep = function(ms) {
return this.flow_.timeout(ms, 'WebDriver.sleep(' + ms + ')');
};
/**
* Schedules a command to retrieve they current window handle.
* @return {!webdriver.promise.Promise} A promise that will be resolved with the
* current window handle.
*/
webdriver.WebDriver.prototype.getWindowHandle = function() {
return this.schedule(
new webdriver.Command(webdriver.CommandName.GET_CURRENT_WINDOW_HANDLE),
'WebDriver.getWindowHandle()');
};
/**
* Schedules a command to retrieve the current list of available window handles.
* @return {!webdriver.promise.Promise} A promise that will be resolved with an
* array of window handles.
*/
webdriver.WebDriver.prototype.getAllWindowHandles = function() {
return this.schedule(
new webdriver.Command(webdriver.CommandName.GET_WINDOW_HANDLES),
'WebDriver.getAllWindowHandles()');
};
/**
* Schedules a command to retrieve the current page's source. The page source
* returned is a representation of the underlying DOM: do not expect it to be
* formatted or escaped in the same way as the response sent from the web
* server.
* @return {!webdriver.promise.Promise} A promise that will be resolved with the
* current page source.
*/
webdriver.WebDriver.prototype.getPageSource = function() {
return this.schedule(
new webdriver.Command(webdriver.CommandName.GET_PAGE_SOURCE),
'WebDriver.getPageSource()');
};
/**
* Schedules a command to close the current window.
* @return {!webdriver.promise.Promise} A promise that will be resolved when
* this command has completed.
*/
webdriver.WebDriver.prototype.close = function() {
return this.schedule(new webdriver.Command(webdriver.CommandName.CLOSE),
'WebDriver.close()');
};
/**
* Schedules a command to navigate to the given URL.
* @param {string} url The fully qualified URL to open.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* document has finished loading.
*/
webdriver.WebDriver.prototype.get = function(url) {
return this.navigate().to(url);
};
/**
* Schedules a command to retrieve the URL of the current page.
* @return {!webdriver.promise.Promise} A promise that will be resolved with the
* current URL.
*/
webdriver.WebDriver.prototype.getCurrentUrl = function() {
return this.schedule(
new webdriver.Command(webdriver.CommandName.GET_CURRENT_URL),
'WebDriver.getCurrentUrl()');
};
/**
* Schedules a command to retrieve the current page's title.
* @return {!webdriver.promise.Promise} A promise that will be resolved with the
* current page's title.
*/
webdriver.WebDriver.prototype.getTitle = function() {
return this.schedule(new webdriver.Command(webdriver.CommandName.GET_TITLE),
'WebDriver.getTitle()');
};
/**
* Schedule a command to find an element on the page. If the element cannot be
* found, a {@link bot.ErrorCode.NO_SUCH_ELEMENT} result will be returned
* by the driver. Unlike other commands, this error cannot be suppressed. In
* other words, scheduling a command to find an element doubles as an assert
* that the element is present on the page. To test whether an element is
* present on the page, use {@link #isElementPresent} instead.
*
* <p>The search criteria for an element may be defined using one of the
* factories in the {@link webdriver.By} namespace, or as a short-hand
* {@link webdriver.By.Hash} object. For example, the following two statements
* are equivalent:
* <code><pre>
* var e1 = driver.findElement(By.id('foo'));
* var e2 = driver.findElement({id:'foo'});
* </pre></code>
*
* <p>You may also provide a custom locator function, which takes as input
* this WebDriver instance and returns a {@link webdriver.WebElement}, or a
* promise that will resolve to a WebElement. For example, to find the first
* visible link on a page, you could write:
* <code><pre>
* var link = driver.findElement(firstVisibleLink);
*
* function firstVisibleLink(driver) {
* var links = driver.findElements(By.tagName('a'));
* return webdriver.promise.filter(links, function(link) {
* return links.isDisplayed();
* }).then(function(visibleLinks) {
* return visibleLinks[0];
* });
* }
* </pre></code>
*
* <p>When running in the browser, a WebDriver cannot manipulate DOM elements
* directly; it may do so only through a {@link webdriver.WebElement} reference.
* This function may be used to generate a WebElement from a DOM element. A
* reference to the DOM element will be stored in a known location and this
* driver will attempt to retrieve it through {@link #executeScript}. If the
* element cannot be found (eg, it belongs to a different document than the
* one this instance is currently focused on), a
* {@link bot.ErrorCode.NO_SUCH_ELEMENT} error will be returned.
*
* @param {!(webdriver.Locator|webdriver.By.Hash|Element|Function)} locator The
* locator to use.
* @return {!webdriver.WebElement} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
webdriver.WebDriver.prototype.findElement = function(locator) {
var id;
if ('nodeType' in locator && 'ownerDocument' in locator) {
var element = /** @type {!Element} */ (locator);
id = this.findDomElement_(element).
then(function(elements) {
if (!elements.length) {
throw new bot.Error(bot.ErrorCode.NO_SUCH_ELEMENT,
'Unable to locate element. Is WebDriver focused on its ' +
'ownerDocument\'s frame?');
}
return elements[0];
});
} else {
locator = webdriver.Locator.checkLocator(locator);
if (goog.isFunction(locator)) {
id = this.findElementInternal_(locator, this);
} else {
var command = new webdriver.Command(webdriver.CommandName.FIND_ELEMENT).
setParameter('using', locator.using).
setParameter('value', locator.value);
id = this.schedule(command, 'WebDriver.findElement(' + locator + ')');
}
}
return new webdriver.WebElement(this, id);
};
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(webdriver.WebDriver|webdriver.WebElement)} context The search
* context.
* @return {!webdriver.promise.Promise.<!webdriver.WebElement>} A
* promise that will resolve to a list of WebElements.
* @private
*/
webdriver.WebDriver.prototype.findElementInternal_ = function(
locatorFn, context) {
return this.call(goog.partial(locatorFn, context)).then(function(result) {
if (goog.isArray(result)) {
result = result[0];
}
if (!(result instanceof webdriver.WebElement)) {
throw new TypeError('Custom locator did not return a WebElement');
}
return result;
});
};
/**
* Locates a DOM element so that commands may be issued against it using the
* {@link webdriver.WebElement} class. This is accomplished by storing a
* reference to the element in an object on the element's ownerDocument.
* {@link #executeScript} will then be used to create a WebElement from this
* reference. This requires this driver to currently be focused on the
* ownerDocument's window+frame.
* @param {!Element} element The element to locate.
* @return {!webdriver.promise.Promise} A promise that will be resolved
* with the located WebElement.
* @private
*/
webdriver.WebDriver.prototype.findDomElement_ = function(element) {
var doc = element.ownerDocument;
var store = doc['$webdriver$'] = doc['$webdriver$'] || {};
var id = Math.floor(Math.random() * goog.now()).toString(36);
store[id] = element;
element[id] = id;
function cleanUp() {
delete store[id];
}
function lookupElement(id) {
var store = document['$webdriver$'];
if (!store) {
return null;
}
var element = store[id];
if (!element || element[id] !== id) {
return [];
}
return [element];
}
return this.executeScript(lookupElement, id).
then(function(value) {
cleanUp();
if (value.length && !(value[0] instanceof webdriver.WebElement)) {
throw new Error('JS locator script result was not a WebElement');
}
return value;
}, cleanUp);
};
/**
* Schedules a command to test if an element is present on the page.
*
* <p>If given a DOM element, this function will check if it belongs to the
* document the driver is currently focused on. Otherwise, the function will
* test if at least one element can be found with the given search criteria.
*
* @param {!(webdriver.Locator|webdriver.By.Hash|Element|
* Function)} locatorOrElement The locator to use, or the actual
* DOM element to be located by the server.
* @return {!webdriver.promise.Promise.<boolean>} A promise that will resolve
* with whether the element is present on the page.
*/
webdriver.WebDriver.prototype.isElementPresent = function(locatorOrElement) {
var findElement =
('nodeType' in locatorOrElement && 'ownerDocument' in locatorOrElement) ?
this.findDomElement_(/** @type {!Element} */ (locatorOrElement)) :
this.findElements.apply(this, arguments);
return findElement.then(function(result) {
return !!result.length;
});
};
/**
* Schedule a command to search for multiple elements on the page.
*
* @param {!(webdriver.Locator|webdriver.By.Hash|Function)} locator The locator
* strategy to use when searching for the element.
* @return {!webdriver.promise.Promise.<!Array.<!webdriver.WebElement>>} A
* promise that will resolve to an array of WebElements.
*/
webdriver.WebDriver.prototype.findElements = function(locator) {
locator = webdriver.Locator.checkLocator(locator);
if (goog.isFunction(locator)) {
return this.findElementsInternal_(locator, this);
} else {
var command = new webdriver.Command(webdriver.CommandName.FIND_ELEMENTS).
setParameter('using', locator.using).
setParameter('value', locator.value);
return this.schedule(command, 'WebDriver.findElements(' + locator + ')');
}
};
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(webdriver.WebDriver|webdriver.WebElement)} context The search
* context.
* @return {!webdriver.promise.Promise.<!Array.<!webdriver.WebElement>>} A
* promise that will resolve to an array of WebElements.
* @private
*/
webdriver.WebDriver.prototype.findElementsInternal_ = function(
locatorFn, context) {
return this.call(goog.partial(locatorFn, context)).then(function(result) {
if (result instanceof webdriver.WebElement) {
return [result];
}
if (!goog.isArray(result)) {
return [];
}
return goog.array.filter(result, function(item) {
return item instanceof webdriver.WebElement;
});
});
};
/**
* Schedule a command to take a screenshot. The driver makes a best effort to
* return a screenshot of the following, in order of preference:
* <ol>
* <li>Entire page
* <li>Current window
* <li>Visible portion of the current frame
* <li>The screenshot of the entire display containing the browser
* </ol>
*
* @return {!webdriver.promise.Promise} A promise that will be resolved to the
* screenshot as a base-64 encoded PNG.
*/
webdriver.WebDriver.prototype.takeScreenshot = function() {
return this.schedule(new webdriver.Command(webdriver.CommandName.SCREENSHOT),
'WebDriver.takeScreenshot()');
};
/**
* @return {!webdriver.WebDriver.Options} The options interface for this
* instance.
*/
webdriver.WebDriver.prototype.manage = function() {
return new webdriver.WebDriver.Options(this);
};
/**
* @return {!webdriver.WebDriver.Navigation} The navigation interface for this
* instance.
*/
webdriver.WebDriver.prototype.navigate = function() {
return new webdriver.WebDriver.Navigation(this);
};
/**
* @return {!webdriver.WebDriver.TargetLocator} The target locator interface for
* this instance.
*/
webdriver.WebDriver.prototype.switchTo = function() {
return new webdriver.WebDriver.TargetLocator(this);
};
/**
* Interface for navigating back and forth in the browser history.
* @param {!webdriver.WebDriver} driver The parent driver.
* @constructor
*/
webdriver.WebDriver.Navigation = function(driver) {
/** @private {!webdriver.WebDriver} */
this.driver_ = driver;
};
/**
* Schedules a command to navigate to a new URL.
* @param {string} url The URL to navigate to.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* URL has been loaded.
*/
webdriver.WebDriver.Navigation.prototype.to = function(url) {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.GET).
setParameter('url', url),
'WebDriver.navigate().to(' + url + ')');
};
/**
* Schedules a command to move backwards in the browser history.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* navigation event has completed.
*/
webdriver.WebDriver.Navigation.prototype.back = function() {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.GO_BACK),
'WebDriver.navigate().back()');
};
/**
* Schedules a command to move forwards in the browser history.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* navigation event has completed.
*/
webdriver.WebDriver.Navigation.prototype.forward = function() {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.GO_FORWARD),
'WebDriver.navigate().forward()');
};
/**
* Schedules a command to refresh the current page.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* navigation event has completed.
*/
webdriver.WebDriver.Navigation.prototype.refresh = function() {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.REFRESH),
'WebDriver.navigate().refresh()');
};
/**
* Provides methods for managing browser and driver state.
* @param {!webdriver.WebDriver} driver The parent driver.
* @constructor
*/
webdriver.WebDriver.Options = function(driver) {
/** @private {!webdriver.WebDriver} */
this.driver_ = driver;
};
/**
* Schedules a command to add a cookie.
* @param {string} name The cookie name.
* @param {string} value The cookie value.
* @param {string=} opt_path The cookie path.
* @param {string=} opt_domain The cookie domain.
* @param {boolean=} opt_isSecure Whether the cookie is secure.
* @param {(number|!Date)=} opt_expiry When the cookie expires. If specified as
* a number, should be in milliseconds since midnight, January 1, 1970 UTC.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* cookie has been added to the page.
*/
webdriver.WebDriver.Options.prototype.addCookie = function(
name, value, opt_path, opt_domain, opt_isSecure, opt_expiry) {
// We do not allow '=' or ';' in the name.
if (/[;=]/.test(name)) {
throw Error('Invalid cookie name "' + name + '"');
}
// We do not allow ';' in value.
if (/;/.test(value)) {
throw Error('Invalid cookie value "' + value + '"');
}
var cookieString = name + '=' + value +
(opt_domain ? ';domain=' + opt_domain : '') +
(opt_path ? ';path=' + opt_path : '') +
(opt_isSecure ? ';secure' : '');
var expiry;
if (goog.isDef(opt_expiry)) {
var expiryDate;
if (goog.isNumber(opt_expiry)) {
expiryDate = new Date(opt_expiry);
} else {
expiryDate = /** @type {!Date} */ (opt_expiry);
opt_expiry = expiryDate.getTime();
}
cookieString += ';expires=' + expiryDate.toUTCString();
// Convert from milliseconds to seconds.
expiry = Math.floor(/** @type {number} */ (opt_expiry) / 1000);
}
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.ADD_COOKIE).
setParameter('cookie', {
'name': name,
'value': value,
'path': opt_path,
'domain': opt_domain,
'secure': !!opt_isSecure,
'expiry': expiry
}),
'WebDriver.manage().addCookie(' + cookieString + ')');
};
/**
* Schedules a command to delete all cookies visible to the current page.
* @return {!webdriver.promise.Promise} A promise that will be resolved when all
* cookies have been deleted.
*/
webdriver.WebDriver.Options.prototype.deleteAllCookies = function() {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.DELETE_ALL_COOKIES),
'WebDriver.manage().deleteAllCookies()');
};
/**
* Schedules a command to delete the cookie with the given name. This command is
* a no-op if there is no cookie with the given name visible to the current
* page.
* @param {string} name The name of the cookie to delete.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* cookie has been deleted.
*/
webdriver.WebDriver.Options.prototype.deleteCookie = function(name) {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.DELETE_COOKIE).
setParameter('name', name),
'WebDriver.manage().deleteCookie(' + name + ')');
};
/**
* Schedules a command to retrieve all cookies visible to the current page.
* Each cookie will be returned as a JSON object as described by the WebDriver
* wire protocol.
* @return {!webdriver.promise.Promise} A promise that will be resolved with the
* cookies visible to the current page.
* @see http://code.google.com/p/selenium/wiki/JsonWireProtocol#Cookie_JSON_Object
*/
webdriver.WebDriver.Options.prototype.getCookies = function() {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.GET_ALL_COOKIES),
'WebDriver.manage().getCookies()');
};
/**
* Schedules a command to retrieve the cookie with the given name. Returns null
* if there is no such cookie. The cookie will be returned as a JSON object as
* described by the WebDriver wire protocol.
* @param {string} name The name of the cookie to retrieve.
* @return {!webdriver.promise.Promise} A promise that will be resolved with the
* named cookie, or {@code null} if there is no such cookie.
* @see http://code.google.com/p/selenium/wiki/JsonWireProtocol#Cookie_JSON_Object
*/
webdriver.WebDriver.Options.prototype.getCookie = function(name) {
return this.getCookies().then(function(cookies) {
return goog.array.find(cookies, function(cookie) {
return cookie && cookie['name'] == name;
});
});
};
/**
* @return {!webdriver.WebDriver.Logs} The interface for managing driver
* logs.
*/
webdriver.WebDriver.Options.prototype.logs = function() {
return new webdriver.WebDriver.Logs(this.driver_);
};
/**
* @return {!webdriver.WebDriver.Timeouts} The interface for managing driver
* timeouts.
*/
webdriver.WebDriver.Options.prototype.timeouts = function() {
return new webdriver.WebDriver.Timeouts(this.driver_);
};
/**
* @return {!webdriver.WebDriver.Window} The interface for managing the
* current window.
*/
webdriver.WebDriver.Options.prototype.window = function() {
return new webdriver.WebDriver.Window(this.driver_);
};
/**
* An interface for managing timeout behavior for WebDriver instances.
* @param {!webdriver.WebDriver} driver The parent driver.
* @constructor
*/
webdriver.WebDriver.Timeouts = function(driver) {
/** @private {!webdriver.WebDriver} */
this.driver_ = driver;
};
/**
* Specifies the amount of time the driver should wait when searching for an
* element if it is not immediately present.
* <p/>
* When searching for a single element, the driver should poll the page
* until the element has been found, or this timeout expires before failing
* with a {@code bot.ErrorCode.NO_SUCH_ELEMENT} error. When searching
* for multiple elements, the driver should poll the page until at least one
* element has been found or this timeout has expired.
* <p/>
* Setting the wait timeout to 0 (its default value), disables implicit
* waiting.
* <p/>
* Increasing the implicit wait timeout should be used judiciously as it
* will have an adverse effect on test run time, especially when used with
* slower location strategies like XPath.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* implicit wait timeout has been set.
*/
webdriver.WebDriver.Timeouts.prototype.implicitlyWait = function(ms) {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.IMPLICITLY_WAIT).
setParameter('ms', ms < 0 ? 0 : ms),
'WebDriver.manage().timeouts().implicitlyWait(' + ms + ')');
};
/**
* Sets the amount of time to wait, in milliseconds, for an asynchronous script
* to finish execution before returning an error. If the timeout is less than or
* equal to 0, the script will be allowed to run indefinitely.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* script timeout has been set.
*/
webdriver.WebDriver.Timeouts.prototype.setScriptTimeout = function(ms) {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.SET_SCRIPT_TIMEOUT).
setParameter('ms', ms < 0 ? 0 : ms),
'WebDriver.manage().timeouts().setScriptTimeout(' + ms + ')');
};
/**
* Sets the amount of time to wait for a page load to complete before returning
* an error. If the timeout is negative, page loads may be indefinite.
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!webdriver.promise.Promise} A promise that will be resolved when
* the timeout has been set.
*/
webdriver.WebDriver.Timeouts.prototype.pageLoadTimeout = function(ms) {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.SET_TIMEOUT).
setParameter('type', 'page load').
setParameter('ms', ms),
'WebDriver.manage().timeouts().pageLoadTimeout(' + ms + ')');
};
/**
* An interface for managing the current window.
* @param {!webdriver.WebDriver} driver The parent driver.
* @constructor
*/
webdriver.WebDriver.Window = function(driver) {
/** @private {!webdriver.WebDriver} */
this.driver_ = driver;
};
/**
* Retrieves the window's current position, relative to the top left corner of
* the screen.
* @return {!webdriver.promise.Promise} A promise that will be resolved with the
* window's position in the form of a {x:number, y:number} object literal.
*/
webdriver.WebDriver.Window.prototype.getPosition = function() {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.GET_WINDOW_POSITION).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().getPosition()');
};
/**
* Repositions the current window.
* @param {number} x The desired horizontal position, relative to the left side
* of the screen.
* @param {number} y The desired vertical position, relative to the top of the
* of the screen.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* command has completed.
*/
webdriver.WebDriver.Window.prototype.setPosition = function(x, y) {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.SET_WINDOW_POSITION).
setParameter('windowHandle', 'current').
setParameter('x', x).
setParameter('y', y),
'WebDriver.manage().window().setPosition(' + x + ', ' + y + ')');
};
/**
* Retrieves the window's current size.
* @return {!webdriver.promise.Promise} A promise that will be resolved with the
* window's size in the form of a {width:number, height:number} object
* literal.
*/
webdriver.WebDriver.Window.prototype.getSize = function() {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.GET_WINDOW_SIZE).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().getSize()');
};
/**
* Resizes the current window.
* @param {number} width The desired window width.
* @param {number} height The desired window height.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* command has completed.
*/
webdriver.WebDriver.Window.prototype.setSize = function(width, height) {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.SET_WINDOW_SIZE).
setParameter('windowHandle', 'current').
setParameter('width', width).
setParameter('height', height),
'WebDriver.manage().window().setSize(' + width + ', ' + height + ')');
};
/**
* Maximizes the current window.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* command has completed.
*/
webdriver.WebDriver.Window.prototype.maximize = function() {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.MAXIMIZE_WINDOW).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().maximize()');
};
/**
* Interface for managing WebDriver log records.
* @param {!webdriver.WebDriver} driver The parent driver.
* @constructor
*/
webdriver.WebDriver.Logs = function(driver) {
/** @private {!webdriver.WebDriver} */
this.driver_ = driver;
};
/**
* Fetches available log entries for the given type.
*
* <p/>Note that log buffers are reset after each call, meaning that
* available log entries correspond to those entries not yet returned for a
* given log type. In practice, this means that this call will return the
* available log entries since the last call, or from the start of the
* session.
*
* @param {!webdriver.logging.Type} type The desired log type.
* @return {!webdriver.promise.Promise.<!Array.<!webdriver.logging.Entry>>} A
* promise that will resolve to a list of log entries for the specified
* type.
*/
webdriver.WebDriver.Logs.prototype.get = function(type) {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.GET_LOG).
setParameter('type', type),
'WebDriver.manage().logs().get(' + type + ')').
then(function(entries) {
return goog.array.map(entries, function(entry) {
if (!(entry instanceof webdriver.logging.Entry)) {
return new webdriver.logging.Entry(
entry['level'], entry['message'], entry['timestamp']);
}
return entry;
});
});
};
/**
* Retrieves the log types available to this driver.
* @return {!webdriver.promise.Promise.<!Array.<!webdriver.logging.Type>>} A
* promise that will resolve to a list of available log types.
*/
webdriver.WebDriver.Logs.prototype.getAvailableLogTypes = function() {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.GET_AVAILABLE_LOG_TYPES),
'WebDriver.manage().logs().getAvailableLogTypes()');
};
/**
* An interface for changing the focus of the driver to another frame or window.
* @param {!webdriver.WebDriver} driver The parent driver.
* @constructor
*/
webdriver.WebDriver.TargetLocator = function(driver) {
/** @private {!webdriver.WebDriver} */
this.driver_ = driver;
};
/**
* Schedules a command retrieve the {@code document.activeElement} element on
* the current document, or {@code document.body} if activeElement is not
* available.
* @return {!webdriver.WebElement} The active element.
*/
webdriver.WebDriver.TargetLocator.prototype.activeElement = function() {
var id = this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.GET_ACTIVE_ELEMENT),
'WebDriver.switchTo().activeElement()');
return new webdriver.WebElement(this.driver_, id);
};
/**
* Schedules a command to switch focus of all future commands to the first frame
* on the page.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* driver has changed focus to the default content.
*/
webdriver.WebDriver.TargetLocator.prototype.defaultContent = function() {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.SWITCH_TO_FRAME).
setParameter('id', null),
'WebDriver.switchTo().defaultContent()');
};
/**
* Schedules a command to switch the focus of all future commands to another
* frame on the page.
* <p/>
* If the frame is specified by a number, the command will switch to the frame
* by its (zero-based) index into the {@code window.frames} collection.
* <p/>
* If the frame is specified by a string, the command will select the frame by
* its name or ID. To select sub-frames, simply separate the frame names/IDs by
* dots. As an example, "main.child" will select the frame with the name "main"
* and then its child "child".
* <p/>
* If the specified frame can not be found, the deferred result will errback
* with a {@code bot.ErrorCode.NO_SUCH_FRAME} error.
* @param {string|number} nameOrIndex The frame locator.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* driver has changed focus to the specified frame.
*/
webdriver.WebDriver.TargetLocator.prototype.frame = function(nameOrIndex) {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.SWITCH_TO_FRAME).
setParameter('id', nameOrIndex),
'WebDriver.switchTo().frame(' + nameOrIndex + ')');
};
/**
* Schedules a command to switch the focus of all future commands to another
* window. Windows may be specified by their {@code window.name} attribute or
* by its handle (as returned by {@code webdriver.WebDriver#getWindowHandles}).
* <p/>
* If the specificed window can not be found, the deferred result will errback
* with a {@code bot.ErrorCode.NO_SUCH_WINDOW} error.
* @param {string} nameOrHandle The name or window handle of the window to
* switch focus to.
* @return {!webdriver.promise.Promise} A promise that will be resolved when the
* driver has changed focus to the specified window.
*/
webdriver.WebDriver.TargetLocator.prototype.window = function(nameOrHandle) {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.SWITCH_TO_WINDOW).
setParameter('name', nameOrHandle),
'WebDriver.switchTo().window(' + nameOrHandle + ')');
};
/**
* Schedules a command to change focus to the active alert dialog. This command
* will return a {@link bot.ErrorCode.NO_MODAL_DIALOG_OPEN} error if a modal
* dialog is not currently open.
* @return {!webdriver.Alert} The open alert.
*/
webdriver.WebDriver.TargetLocator.prototype.alert = function() {
var text = this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.GET_ALERT_TEXT),
'WebDriver.switchTo().alert()');
return new webdriver.Alert(this.driver_, text);
};
/**
* Simulate pressing many keys at once in a "chord". Takes a sequence of
* {@link webdriver.Key}s or strings, appends each of the values to a string,
* and adds the chord termination key ({@link webdriver.Key.NULL}) and returns
* the resultant string.
*
* Note: when the low-level webdriver key handlers see Keys.NULL, active
* modifier keys (CTRL/ALT/SHIFT/etc) release via a keyup event.
*
* @param {...string} var_args The key sequence to concatenate.
* @return {string} The null-terminated key sequence.
* @see http://code.google.com/p/webdriver/issues/detail?id=79
*/
webdriver.Key.chord = function(var_args) {
var sequence = goog.array.reduce(
goog.array.slice(arguments, 0),
function(str, key) {
return str + key;
}, '');
sequence += webdriver.Key.NULL;
return sequence;
};
//////////////////////////////////////////////////////////////////////////////
//
// webdriver.WebElement
//
//////////////////////////////////////////////////////////////////////////////
/**
* Represents a DOM element. WebElements can be found by searching from the
* document root using a {@code webdriver.WebDriver} instance, or by searching
* under another {@code webdriver.WebElement}:
* <pre><code>
* driver.get('http://www.google.com');
* var searchForm = driver.findElement(By.tagName('form'));
* var searchBox = searchForm.findElement(By.name('q'));
* searchBox.sendKeys('webdriver');
* </code></pre>
*
* The WebElement is implemented as a promise for compatibility with the promise
* API. It will always resolve itself when its internal state has been fully
* resolved and commands may be issued against the element. This can be used to
* catch errors when an element cannot be located on the page:
* <pre><code>
* driver.findElement(By.id('not-there')).then(function(element) {
* alert('Found an element that was not expected to be there!');
* }, function(error) {
* alert('The element was not found, as expected');
* });
* </code></pre>
*
* @param {!webdriver.WebDriver} driver The parent WebDriver instance for this
* element.
* @param {!(string|webdriver.promise.Promise)} id Either the opaque ID for the
* underlying DOM element assigned by the server, or a promise that will
* resolve to that ID or another WebElement.
* @constructor
* @extends {webdriver.promise.Deferred}
*/
webdriver.WebElement = function(driver, id) {
webdriver.promise.Deferred.call(this, null, driver.controlFlow());
/**
* The parent WebDriver instance for this element.
* @private {!webdriver.WebDriver}
*/
this.driver_ = driver;
// This class is responsible for resolving itself; delete the resolve and
// reject methods so they may not be accessed by consumers of this class.
var fulfill = goog.partial(this.fulfill, this);
var reject = this.reject;
delete this.promise;
delete this.fulfill;
delete this.reject;
/**
* A promise that resolves to the JSON representation of this WebElement's
* ID, as defined by the WebDriver wire protocol.
* @private {!webdriver.promise.Promise}
* @see http://code.google.com/p/selenium/wiki/JsonWireProtocol
*/
this.id_ = webdriver.promise.when(id, function(id) {
if (id instanceof webdriver.WebElement) {
return id.id_;
} else if (goog.isDef(id[webdriver.WebElement.ELEMENT_KEY])) {
return id;
}
var json = {};
json[webdriver.WebElement.ELEMENT_KEY] = id;
return json;
});
// This WebElement should not be resolved until its ID has been
// fully resolved.
this.id_.then(fulfill, reject);
};
goog.inherits(webdriver.WebElement, webdriver.promise.Deferred);
/**
* The property key used in the wire protocol to indicate that a JSON object
* contains the ID of a WebElement.
* @type {string}
* @const
*/
webdriver.WebElement.ELEMENT_KEY = 'ELEMENT';
/**
* Compares to WebElements for equality.
* @param {!webdriver.WebElement} a A WebElement.
* @param {!webdriver.WebElement} b A WebElement.
* @return {!webdriver.promise.Promise} A promise that will be resolved to
* whether the two WebElements are equal.
*/
webdriver.WebElement.equals = function(a, b) {
if (a == b) {
return webdriver.promise.fulfilled(true);
}
return webdriver.promise.fullyResolved([a.id_, b.id_]).then(function(ids) {
// If the two element's have the same ID, they should be considered
// equal. Otherwise, they may still be equivalent, but we'll need to
// ask the server to check for us.
if (ids[0][webdriver.WebElement.ELEMENT_KEY] ==
ids[1][webdriver.WebElement.ELEMENT_KEY]) {
return true;
}
var command = new webdriver.Command(
webdriver.CommandName.ELEMENT_EQUALS);
command.setParameter('other', b);
return a.schedule_(command, 'webdriver.WebElement.equals()');
});
};
/**
* @return {!webdriver.WebDriver} The parent driver for this instance.
*/
webdriver.WebElement.prototype.getDriver = function() {
return this.driver_;
};
/**
* @return {!webdriver.promise.Promise} A promise that resolves to this
* element's JSON representation as defined by the WebDriver wire protocol.
* @see http://code.google.com/p/selenium/wiki/JsonWireProtocol
*/
webdriver.WebElement.prototype.toWireValue = function() {
return this.id_;
};
/**
* Schedules a command that targets this element with the parent WebDriver
* instance. Will ensure this element's ID is included in the command parameters
* under the "id" key.
* @param {!webdriver.Command} command The command to schedule.
* @param {string} description A description of the command for debugging.
* @return {!webdriver.promise.Promise} A promise that will be resolved with
* the command result.
* @see webdriver.WebDriver.prototype.schedule
* @private
*/
webdriver.WebElement.prototype.schedule_ = function(command, description) {
command.setParameter('id', this.id_);
return this.driver_.schedule(command, description);
};
/**
* Schedule a command to find a descendant of this element. If the element
* cannot be found, a {@code bot.ErrorCode.NO_SUCH_ELEMENT} result will
* be returned by the driver. Unlike other commands, this error cannot be
* suppressed. In other words, scheduling a command to find an element doubles
* as an assert that the element is present on the page. To test whether an
* element is present on the page, use {@code #isElementPresent} instead.
*
* <p>The search criteria for an element may be defined using one of the
* factories in the {@link webdriver.By} namespace, or as a short-hand
* {@link webdriver.By.Hash} object. For example, the following two statements
* are equivalent:
* <code><pre>
* var e1 = element.findElement(By.id('foo'));
* var e2 = element.findElement({id:'foo'});
* </pre></code>
*
* <p>You may also provide a custom locator function, which takes as input
* this WebDriver instance and returns a {@link webdriver.WebElement}, or a
* promise that will resolve to a WebElement. For example, to find the first
* visible link on a page, you could write:
* <code><pre>
* var link = element.findElement(firstVisibleLink);
*
* function firstVisibleLink(element) {
* var links = element.findElements(By.tagName('a'));
* return webdriver.promise.filter(links, function(link) {
* return links.isDisplayed();
* }).then(function(visibleLinks) {
* return visibleLinks[0];
* });
* }
* </pre></code>
*
* @param {!(webdriver.Locator|webdriver.By.Hash|Function)} locator The
* locator strategy to use when searching for the element.
* @return {!webdriver.WebElement} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
webdriver.WebElement.prototype.findElement = function(locator) {
locator = webdriver.Locator.checkLocator(locator);
var id;
if (goog.isFunction(locator)) {
id = this.driver_.findElementInternal_(locator, this);
} else {
var command = new webdriver.Command(
webdriver.CommandName.FIND_CHILD_ELEMENT).
setParameter('using', locator.using).
setParameter('value', locator.value);
id = this.schedule_(command, 'WebElement.findElement(' + locator + ')');
}
return new webdriver.WebElement(this.driver_, id);
};
/**
* Schedules a command to test if there is at least one descendant of this
* element that matches the given search criteria.
*
* @param {!(webdriver.Locator|webdriver.By.Hash|Function)} locator The
* locator strategy to use when searching for the element.
* @return {!webdriver.promise.Promise.<boolean>} A promise that will be
* resolved with whether an element could be located on the page.
*/
webdriver.WebElement.prototype.isElementPresent = function(locator) {
return this.findElements(locator).then(function(result) {
return !!result.length;
});
};
/**
* Schedules a command to find all of the descendants of this element that
* match the given search criteria.
*
* @param {!(webdriver.Locator|webdriver.By.Hash|Function)} locator The
* locator strategy to use when searching for the elements.
* @return {!webdriver.promise.Promise.<!Array.<!webdriver.WebElement>>} A
* promise that will resolve to an array of WebElements.
*/
webdriver.WebElement.prototype.findElements = function(locator) {
locator = webdriver.Locator.checkLocator(locator);
if (goog.isFunction(locator)) {
return this.driver_.findElementsInternal_(locator, this);
} else {
var command = new webdriver.Command(
webdriver.CommandName.FIND_CHILD_ELEMENTS).
setParameter('using', locator.using).
setParameter('value', locator.value);
return this.schedule_(command, 'WebElement.findElements(' + locator + ')');
}
};
/**
* Schedules a command to click on this element.
* @return {!webdriver.promise.Promise} A promise that will be resolved when
* the click command has completed.
*/
webdriver.WebElement.prototype.click = function() {
return this.schedule_(
new webdriver.Command(webdriver.CommandName.CLICK_ELEMENT),
'WebElement.click()');
};
/**
* Schedules a command to type a sequence on the DOM element represented by this
* instance.
* <p/>
* Modifier keys (SHIFT, CONTROL, ALT, META) are stateful; once a modifier is
* processed in the keysequence, that key state is toggled until one of the
* following occurs:
* <ul>
* <li>The modifier key is encountered again in the sequence. At this point the
* state of the key is toggled (along with the appropriate keyup/down events).
* </li>
* <li>The {@code webdriver.Key.NULL} key is encountered in the sequence. When
* this key is encountered, all modifier keys current in the down state are
* released (with accompanying keyup events). The NULL key can be used to
* simulate common keyboard shortcuts:
* <code><pre>
* element.sendKeys("text was",
* webdriver.Key.CONTROL, "a", webdriver.Key.NULL,
* "now text is");
* // Alternatively:
* element.sendKeys("text was",
* webdriver.Key.chord(webdriver.Key.CONTROL, "a"),
* "now text is");
* </pre></code></li>
* <li>The end of the keysequence is encountered. When there are no more keys
* to type, all depressed modifier keys are released (with accompanying keyup
* events).
* </li>
* </ul>
* <strong>Note:</strong> On browsers where native keyboard events are not yet
* supported (e.g. Firefox on OS X), key events will be synthesized. Special
* punctionation keys will be synthesized according to a standard QWERTY en-us
* keyboard layout.
*
* @param {...string} var_args The sequence of keys to
* type. All arguments will be joined into a single sequence (var_args is
* permitted for convenience).
* @return {!webdriver.promise.Promise} A promise that will be resolved when all
* keys have been typed.
*/
webdriver.WebElement.prototype.sendKeys = function(var_args) {
// Coerce every argument to a string. This protects us from users that
// ignore the jsdoc and give us a number (which ends up causing problems on
// the server, which requires strings).
var keys = webdriver.promise.fullyResolved(goog.array.slice(arguments, 0)).
then(function(args) {
return goog.array.map(goog.array.slice(args, 0), function(key) {
return key + '';
});
});
return this.schedule_(
new webdriver.Command(webdriver.CommandName.SEND_KEYS_TO_ELEMENT).
setParameter('value', keys),
'WebElement.sendKeys(' + keys + ')');
};
/**
* Schedules a command to query for the tag/node name of this element.
* @return {!webdriver.promise.Promise} A promise that will be resolved with the
* element's tag name.
*/
webdriver.WebElement.prototype.getTagName = function() {
return this.schedule_(
new webdriver.Command(webdriver.CommandName.GET_ELEMENT_TAG_NAME),
'WebElement.getTagName()');
};
/**
* Schedules a command to query for the computed style of the element
* represented by this instance. If the element inherits the named style from
* its parent, the parent will be queried for its value. Where possible, color
* values will be converted to their hex representation (e.g. #00ff00 instead of
* rgb(0, 255, 0)).
* <p/>
* <em>Warning:</em> the value returned will be as the browser interprets it, so
* it may be tricky to form a proper assertion.
*
* @param {string} cssStyleProperty The name of the CSS style property to look
* up.
* @return {!webdriver.promise.Promise} A promise that will be resolved with the
* requested CSS value.
*/
webdriver.WebElement.prototype.getCssValue = function(cssStyleProperty) {
var name = webdriver.CommandName.GET_ELEMENT_VALUE_OF_CSS_PROPERTY;
return this.schedule_(
new webdriver.Command(name).
setParameter('propertyName', cssStyleProperty),
'WebElement.getCssValue(' + cssStyleProperty + ')');
};
/**
* Schedules a command to query for the value of the given attribute of the
* element. Will return the current value, even if it has been modified after
* the page has been loaded. More exactly, this method will return the value of
* the given attribute, unless that attribute is not present, in which case the
* value of the property with the same name is returned. If neither value is
* set, null is returned (for example, the "value" property of a textarea
* element). The "style" attribute is converted as best can be to a
* text representation with a trailing semi-colon. The following are deemed to
* be "boolean" attributes and will return either "true" or null:
*
* <p>async, autofocus, autoplay, checked, compact, complete, controls, declare,
* defaultchecked, defaultselected, defer, disabled, draggable, ended,
* formnovalidate, hidden, indeterminate, iscontenteditable, ismap, itemscope,
* loop, multiple, muted, nohref, noresize, noshade, novalidate, nowrap, open,
* paused, pubdate, readonly, required, reversed, scoped, seamless, seeking,
* selected, spellcheck, truespeed, willvalidate
*
* <p>Finally, the following commonly mis-capitalized attribute/property names
* are evaluated as expected:
* <ul>
* <li>"class"
* <li>"readonly"
* </ul>
* @param {string} attributeName The name of the attribute to query.
* @return {!webdriver.promise.Promise} A promise that will be resolved with the
* attribute's value. The returned value will always be either a string or
* null.
*/
webdriver.WebElement.prototype.getAttribute = function(attributeName) {
return this.schedule_(
new webdriver.Command(webdriver.CommandName.GET_ELEMENT_ATTRIBUTE).
setParameter('name', attributeName),
'WebElement.getAttribute(' + attributeName + ')');
};
/**
* Get the visible (i.e. not hidden by CSS) innerText of this element, including
* sub-elements, without any leading or trailing whitespace.
* @return {!webdriver.promise.Promise} A promise that will be resolved with the
* element's visible text.
*/
webdriver.WebElement.prototype.getText = function() {
return this.schedule_(
new webdriver.Command(webdriver.CommandName.GET_ELEMENT_TEXT),
'WebElement.getText()');
};
/**
* Schedules a command to compute the size of this element's bounding box, in
* pixels.
* @return {!webdriver.promise.Promise} A promise that will be resolved with the
* element's size as a {@code {width:number, height:number}} object.
*/
webdriver.WebElement.prototype.getSize = function() {
return this.schedule_(
new webdriver.Command(webdriver.CommandName.GET_ELEMENT_SIZE),
'WebElement.getSize()');
};
/**
* Schedules a command to compute the location of this element in page space.
* @return {!webdriver.promise.Promise} A promise that will be resolved to the
* element's location as a {@code {x:number, y:number}} object.
*/
webdriver.WebElement.prototype.getLocation = function() {
return this.schedule_(
new webdriver.Command(webdriver.CommandName.GET_ELEMENT_LOCATION),
'WebElement.getLocation()');
};
/**
* Schedules a command to query whether the DOM element represented by this
* instance is enabled, as dicted by the {@code disabled} attribute.
* @return {!webdriver.promise.Promise} A promise that will be resolved with
* whether this element is currently enabled.
*/
webdriver.WebElement.prototype.isEnabled = function() {
return this.schedule_(
new webdriver.Command(webdriver.CommandName.IS_ELEMENT_ENABLED),
'WebElement.isEnabled()');
};
/**
* Schedules a command to query whether this element is selected.
* @return {!webdriver.promise.Promise} A promise that will be resolved with
* whether this element is currently selected.
*/
webdriver.WebElement.prototype.isSelected = function() {
return this.schedule_(
new webdriver.Command(webdriver.CommandName.IS_ELEMENT_SELECTED),
'WebElement.isSelected()');
};
/**
* Schedules a command to submit the form containing this element (or this
* element if it is a FORM element). This command is a no-op if the element is
* not contained in a form.
* @return {!webdriver.promise.Promise} A promise that will be resolved when
* the form has been submitted.
*/
webdriver.WebElement.prototype.submit = function() {
return this.schedule_(
new webdriver.Command(webdriver.CommandName.SUBMIT_ELEMENT),
'WebElement.submit()');
};
/**
* Schedules a command to clear the {@code value} of this element. This command
* has no effect if the underlying DOM element is neither a text INPUT element
* nor a TEXTAREA element.
* @return {!webdriver.promise.Promise} A promise that will be resolved when
* the element has been cleared.
*/
webdriver.WebElement.prototype.clear = function() {
return this.schedule_(
new webdriver.Command(webdriver.CommandName.CLEAR_ELEMENT),
'WebElement.clear()');
};
/**
* Schedules a command to test whether this element is currently displayed.
* @return {!webdriver.promise.Promise} A promise that will be resolved with
* whether this element is currently visible on the page.
*/
webdriver.WebElement.prototype.isDisplayed = function() {
return this.schedule_(
new webdriver.Command(webdriver.CommandName.IS_ELEMENT_DISPLAYED),
'WebElement.isDisplayed()');
};
/**
* Schedules a command to retrieve the outer HTML of this element.
* @return {!webdriver.promise.Promise} A promise that will be resolved with
* the element's outer HTML.
*/
webdriver.WebElement.prototype.getOuterHtml = function() {
return this.driver_.executeScript(function() {
var element = arguments[0];
if ('outerHTML' in element) {
return element.outerHTML;
} else {
var div = element.ownerDocument.createElement('div');
div.appendChild(element.cloneNode(true));
return div.innerHTML;
}
}, this);
};
/**
* Schedules a command to retrieve the inner HTML of this element.
* @return {!webdriver.promise.Promise} A promise that will be resolved with the
* element's inner HTML.
*/
webdriver.WebElement.prototype.getInnerHtml = function() {
return this.driver_.executeScript('return arguments[0].innerHTML', this);
};
/**
* Represents a modal dialog such as {@code alert}, {@code confirm}, or
* {@code prompt}. Provides functions to retrieve the message displayed with
* the alert, accept or dismiss the alert, and set the response text (in the
* case of {@code prompt}).
* @param {!webdriver.WebDriver} driver The driver controlling the browser this
* alert is attached to.
* @param {!(string|webdriver.promise.Promise)} text Either the message text
* displayed with this alert, or a promise that will be resolved to said
* text.
* @constructor
* @extends {webdriver.promise.Deferred}
*/
webdriver.Alert = function(driver, text) {
goog.base(this, null, driver.controlFlow());
/** @private {!webdriver.WebDriver} */
this.driver_ = driver;
// This class is responsible for resolving itself; delete the resolve and
// reject methods so they may not be accessed by consumers of this class.
var fulfill = goog.partial(this.fulfill, this);
var reject = this.reject;
delete this.promise;
delete this.fulfill;
delete this.reject;
/** @private {!webdriver.promise.Promise} */
this.text_ = webdriver.promise.when(text);
// Make sure this instance is resolved when its displayed text is.
this.text_.then(fulfill, reject);
};
goog.inherits(webdriver.Alert, webdriver.promise.Deferred);
/**
* Retrieves the message text displayed with this alert. For instance, if the
* alert were opened with alert("hello"), then this would return "hello".
* @return {!webdriver.promise.Promise} A promise that will be resolved to the
* text displayed with this alert.
*/
webdriver.Alert.prototype.getText = function() {
return this.text_;
};
/**
* Accepts this alert.
* @return {!webdriver.promise.Promise} A promise that will be resolved when
* this command has completed.
*/
webdriver.Alert.prototype.accept = function() {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.ACCEPT_ALERT),
'WebDriver.switchTo().alert().accept()');
};
/**
* Dismisses this alert.
* @return {!webdriver.promise.Promise} A promise that will be resolved when
* this command has completed.
*/
webdriver.Alert.prototype.dismiss = function() {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.DISMISS_ALERT),
'WebDriver.switchTo().alert().dismiss()');
};
/**
* Sets the response text on this alert. This command will return an error if
* the underlying alert does not support response text (e.g. window.alert and
* window.confirm).
* @param {string} text The text to set.
* @return {!webdriver.promise.Promise} A promise that will be resolved when
* this command has completed.
*/
webdriver.Alert.prototype.sendKeys = function(text) {
return this.driver_.schedule(
new webdriver.Command(webdriver.CommandName.SET_ALERT_TEXT).
setParameter('text', text),
'WebDriver.switchTo().alert().sendKeys(' + text + ')');
};
/**
* An error returned to indicate that there is an unhandled modal dialog on the
* current page.
* @param {string} message The error message.
* @param {!webdriver.Alert} alert The alert handle.
* @constructor
* @extends {bot.Error}
*/
webdriver.UnhandledAlertError = function(message, alert) {
goog.base(this, bot.ErrorCode.MODAL_DIALOG_OPENED, message);
/** @private {!webdriver.Alert} */
this.alert_ = alert;
};
goog.inherits(webdriver.UnhandledAlertError, bot.Error);
/**
* @return {!webdriver.Alert} The open alert.
*/
webdriver.UnhandledAlertError.prototype.getAlert = function() {
return this.alert_;
};
| 1 | 10,951 | Will you link to {!webdriver.promise.Promise} here? | SeleniumHQ-selenium | py |
@@ -45,6 +45,7 @@ Workshops::Application.routes.draw do
resources :licenses, only: [:create]
end
get '/products/:id/purchases/:lookup' => redirect("/purchases/%{lookup}")
+ get '/purchases/:lookup' => 'pages#show', id: 'purchase-show'
resources :books, only: :show, controller: 'products' do
resources :licenses, only: [:create] | 1 | Workshops::Application.routes.draw do
use_doorkeeper
mount RailsAdmin::Engine => '/admin', :as => 'admin'
root to: 'homes#show'
get '/api/v1/me.json' => 'api/v1/users#show', as: :resource_owner
namespace :api do
namespace :v1 do
resources :completions, only: [:index, :show, :create, :destroy]
end
end
namespace :teams do
resources :invitations, only: [:create] do
resources :acceptances, only: [:new, :create]
end
resource :team, only: :edit
end
get '/pages/tmux' => redirect('https://www.youtube.com/watch?v=CKC8Ph-s2F4')
if Rails.env.staging? || Rails.env.production?
get '/products/:id' => redirect('/workshops/18-test-driven-rails'),
constraints: { id: /(10|12).*/ }
get '/products/:id' => redirect('/workshops/19-design-for-developers'),
constraints: { id: /(9|11).*/ }
get '/products/:id' => redirect('https://www.youtube.com/watch?v=CKC8Ph-s2F4'),
constraints: { id: /(4).*/ }
get '/products/14' => redirect('/prime')
get '/products/14-prime' => redirect('/prime')
end
resource :session, controller: 'sessions'
get '/courses.json' => redirect('/workshops.json')
get '/courses/:id' => redirect('/workshops/%{id}')
resources :workshops, only: [:show] do
resources :licenses, only: [:create]
end
resources :products, only: [:index, :show] do
resources :licenses, only: [:create]
end
get '/products/:id/purchases/:lookup' => redirect("/purchases/%{lookup}")
resources :books, only: :show, controller: 'products' do
resources :licenses, only: [:create]
end
resources :screencasts, only: :show, controller: 'products' do
resources :licenses, only: [:create]
end
resources :shows, only: :show, controller: 'products' do
resources :licenses, only: [:create]
end
get '/the-weekly-iteration' => 'weekly_iterations#show', as: :weekly_iteration
resources :licenses, only: [:index]
resources :videos, only: [:show]
namespace :subscriber do
resources :invoices, only: [:index, :show]
resource :cancellation, only: [:new, :create]
resource :downgrade, only: :create
end
resource :subscription, only: [:new, :edit, :update]
resource :credit_card, only: [:update]
scope ':plan' do
resources :checkouts, only: [:new, :create]
resources :redemptions, only: [:new]
end
get '/podcast.xml' => redirect('http://podcasts.thoughtbot.com/giantrobots.xml')
get '/podcast' => redirect('http://podcasts.thoughtbot.com/giantrobots')
get '/podcast/articles' => 'articles#index', id: 'podcast'
get '/podcast/:id' => redirect("http://podcasts.thoughtbot.com/giantrobots/%{id}")
get '/podcasts' => redirect('http://podcasts.thoughtbot.com/giantrobots')
get '/podcasts/:id' => redirect("http://podcasts.thoughtbot.com/giantrobots/%{id}")
get '/giantrobots.xml' => redirect('http://podcasts.thoughtbot.com/giantrobots.xml')
get '/giantrobots' => redirect('http://podcasts.thoughtbot.com/giantrobots')
get '/giantrobots/:id.mp3' => redirect("http://podcasts.thoughtbot.com/giantrobots/%{id}.mp3")
get '/giantrobots/:id' => redirect("http://podcasts.thoughtbot.com/giantrobots/%{id}")
get '/buildphase.xml' => redirect('http://podcasts.thoughtbot.com/buildphase.xml')
get '/buildphase' => redirect('http://podcasts.thoughtbot.com/buildphase')
get '/buildphase/:id.mp3' => redirect("http://podcasts.thoughtbot.com/buildphase/%{id}.mp3")
get '/buildphase/:id' => redirect("http://podcasts.thoughtbot.com/buildphase/%{id}")
resources :design_for_developers_resources, path: 'design-for-developers-resources', only: [:index, :show]
resources :test_driven_rails_resources, path: 'test-driven-rails-resources', only: [:index]
get '/d4d-resources' => redirect('/design-for-developers-resources')
resources :topics, only: :index, path: 'trails'
get '/auth/:provider/callback', to: 'auth_callbacks#create'
get "/pages/*id" => 'pages#show', format: false
get '/prime' => 'promoted_catalogs#show', as: :prime
get '/privacy' => 'pages#show', as: :privacy, id: 'privacy'
get '/terms' => 'pages#show', as: :terms, id: 'terms'
get '/directions' => "pages#show", as: :directions, id: "directions"
get '/group-training' => "pages#show", as: :group_training, id: "group-training"
get '/humans-present/oss' => redirect('https://www.youtube.com/watch?v=VMBhumlUP-A')
get '/backbone.js' => redirect('/backbone')
get "/backbone-js-on-rails" => redirect("/products/1-backbone-js-on-rails")
get "/geocoding-on-rails" => redirect("/products/22-geocoding-on-rails")
get '/geocodingonrails' => redirect('/products/22-geocoding-on-rails')
get "/ios-on-rails" => redirect("/products/25-ios-on-rails-beta")
get "/ruby-science" => redirect("/products/13-ruby-science")
get '/gettingstartedwithios' => redirect('/workshops/24-getting-started-with-ios-development?utm_source=podcast')
get '/5by5' => redirect('/workshops/19-design-for-developers?utm_source=5by5')
get '/rubyist-booster-shot' => "pages#show", as: :rubyist_booster_shot, id: "rubyist-booster-shot"
get '/live' => redirect(OfficeHours.url)
patch '/my_account' => 'users#update', as: 'edit_my_account'
get '/my_account' => 'users#edit', as: 'my_account'
resources :users, controller: 'users' do
resources :notes, only: [:create, :edit, :update]
resource :password, :controller => 'passwords', :only => [:create, :edit, :update]
end
get '/sign_up' => 'users#new', as: 'sign_up_app'
get '/sign_in' => 'sessions#new', as: 'sign_in_app'
resources :passwords, controller: 'passwords', :only => [:create, :new]
resource :dashboard, only: :show
mount StripeEvent::Engine, at: 'stripe-webhook'
get ':id' => 'topics#show', as: :topic
get '/:id/articles' => redirect('http://robots.thoughtbot.com/tags/%{id}')
end
| 1 | 10,435 | Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping. | thoughtbot-upcase | rb |
@@ -96,11 +96,10 @@ public class DataService extends IntentService {
ObjectivesPlugin.bgIsAvailableInNS = true;
ObjectivesPlugin.saveProgress();
} else if (isNSProfile && Intents.ACTION_NEW_PROFILE.equals(action)){
- // always handle Profili if NSProfile is enabled without looking at nsUploadOnly
+ // always handle Profile if NSProfile is enabled without looking at nsUploadOnly
handleNewDataFromNSClient(intent);
} else if (!nsUploadOnly &&
- (Intents.ACTION_NEW_PROFILE.equals(action) ||
- Intents.ACTION_NEW_TREATMENT.equals(action) ||
+ (Intents.ACTION_NEW_TREATMENT.equals(action) ||
Intents.ACTION_CHANGED_TREATMENT.equals(action) ||
Intents.ACTION_REMOVED_TREATMENT.equals(action) ||
Intents.ACTION_NEW_STATUS.equals(action) || | 1 | package info.nightscout.androidaps.Services;
import android.app.IntentService;
import android.content.Intent;
import android.content.SharedPreferences;
import android.os.Bundle;
import android.preference.PreferenceManager;
import android.provider.Telephony;
import android.support.annotation.Nullable;
import com.j256.ormlite.dao.Dao;
import com.j256.ormlite.stmt.PreparedQuery;
import com.j256.ormlite.stmt.QueryBuilder;
import com.j256.ormlite.stmt.Where;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.SQLException;
import java.util.Date;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import info.nightscout.androidaps.Config;
import info.nightscout.androidaps.Constants;
import info.nightscout.androidaps.MainApp;
import info.nightscout.androidaps.R;
import info.nightscout.androidaps.db.BgReading;
import info.nightscout.androidaps.db.DanaRHistoryRecord;
import info.nightscout.androidaps.db.Treatment;
import info.nightscout.androidaps.events.EventNewBG;
import info.nightscout.androidaps.events.EventNewBasalProfile;
import info.nightscout.androidaps.events.EventTreatmentChange;
import info.nightscout.androidaps.interfaces.PumpInterface;
import info.nightscout.androidaps.plugins.ConfigBuilder.ConfigBuilderPlugin;
import info.nightscout.androidaps.plugins.DanaR.History.DanaRNSHistorySync;
import info.nightscout.androidaps.plugins.NSProfileViewer.NSProfileViewerPlugin;
import info.nightscout.androidaps.plugins.Objectives.ObjectivesPlugin;
import info.nightscout.androidaps.plugins.Overview.OverviewPlugin;
import info.nightscout.androidaps.plugins.SmsCommunicator.SmsCommunicatorPlugin;
import info.nightscout.androidaps.plugins.SmsCommunicator.events.EventNewSMS;
import info.nightscout.androidaps.plugins.SourceNSClient.SourceNSClientPlugin;
import info.nightscout.androidaps.plugins.SourceXdrip.SourceXdripPlugin;
import info.nightscout.androidaps.receivers.DataReceiver;
import info.nightscout.client.data.NSProfile;
import info.nightscout.client.data.NSSgv;
import info.nightscout.utils.ToastUtils;
public class DataService extends IntentService {
private static Logger log = LoggerFactory.getLogger(DataService.class);
boolean xDripEnabled = false;
boolean nsClientEnabled = true;
public DataService() {
super("DataService");
registerBus();
}
@Override
protected void onHandleIntent(final Intent intent) {
if (Config.logFunctionCalls)
log.debug("onHandleIntent " + intent);
if (ConfigBuilderPlugin.getActiveBgSource().getClass().equals(SourceXdripPlugin.class)) {
xDripEnabled = true;
nsClientEnabled = false;
} else if (ConfigBuilderPlugin.getActiveBgSource().getClass().equals(SourceNSClientPlugin.class)) {
xDripEnabled = false;
nsClientEnabled = true;
}
boolean isNSProfile = ConfigBuilderPlugin.getActiveProfile().getClass().equals(NSProfileViewerPlugin.class);
SharedPreferences SP = PreferenceManager.getDefaultSharedPreferences(getApplicationContext());
boolean nsUploadOnly = SP.getBoolean("ns_upload_only", false);
if (intent != null) {
final String action = intent.getAction();
if (Intents.ACTION_NEW_BG_ESTIMATE.equals(action)) {
if (xDripEnabled) {
handleNewDataFromXDrip(intent);
}
} else if (Intents.ACTION_NEW_SGV.equals(action)) {
// always handle SGV if NS-Client is the source
if (nsClientEnabled) {
handleNewDataFromNSClient(intent);
}
// Objectives 0
ObjectivesPlugin.bgIsAvailableInNS = true;
ObjectivesPlugin.saveProgress();
} else if (isNSProfile && Intents.ACTION_NEW_PROFILE.equals(action)){
// always handle Profili if NSProfile is enabled without looking at nsUploadOnly
handleNewDataFromNSClient(intent);
} else if (!nsUploadOnly &&
(Intents.ACTION_NEW_PROFILE.equals(action) ||
Intents.ACTION_NEW_TREATMENT.equals(action) ||
Intents.ACTION_CHANGED_TREATMENT.equals(action) ||
Intents.ACTION_REMOVED_TREATMENT.equals(action) ||
Intents.ACTION_NEW_STATUS.equals(action) ||
Intents.ACTION_NEW_DEVICESTATUS.equals(action) ||
Intents.ACTION_NEW_CAL.equals(action) ||
Intents.ACTION_NEW_MBG.equals(action))
) {
handleNewDataFromNSClient(intent);
} else if (Telephony.Sms.Intents.SMS_RECEIVED_ACTION.equals(action)) {
handleNewSMS(intent);
}
}
if (Config.logFunctionCalls)
log.debug("onHandleIntent exit " + intent);
DataReceiver.completeWakefulIntent(intent);
}
/*
@Override
public int onStartCommand(Intent intent, int flags, int startId) {
super.onStartCommand(intent, flags, startId);
if (Config.logFunctionCalls)
log.debug("onStartCommand");
return START_STICKY;
}
*/
@Override
public void onDestroy() {
super.onDestroy();
MainApp.bus().unregister(this);
}
private void registerBus() {
try {
MainApp.bus().unregister(this);
} catch (RuntimeException x) {
// Ignore
}
MainApp.bus().register(this);
}
private void handleNewDataFromXDrip(Intent intent) {
Bundle bundle = intent.getExtras();
if (bundle == null) return;
BgReading bgReading = new BgReading();
bgReading.value = bundle.getDouble(Intents.EXTRA_BG_ESTIMATE);
bgReading.slope = bundle.getDouble(Intents.EXTRA_BG_SLOPE);
bgReading.battery_level = bundle.getInt(Intents.EXTRA_SENSOR_BATTERY);
bgReading.timeIndex = bundle.getLong(Intents.EXTRA_TIMESTAMP);
bgReading.raw = bundle.getDouble(Intents.EXTRA_RAW);
if (bgReading.timeIndex < new Date().getTime() - Constants.hoursToKeepInDatabase * 60 * 60 * 1000L) {
if (Config.logIncommingBG)
log.debug("Ignoring old XDRIPREC BG " + bgReading.toString());
return;
}
if (Config.logIncommingBG)
log.debug("XDRIPREC BG " + bgReading.toString());
try {
MainApp.getDbHelper().getDaoBgReadings().createIfNotExists(bgReading);
} catch (SQLException e) {
e.printStackTrace();
}
MainApp.bus().post(new EventNewBG());
}
private void handleNewDataFromNSClient(Intent intent) {
Bundle bundles = intent.getExtras();
if (bundles == null) return;
if (Config.logIncommingData)
log.debug("Got intent: " + intent.getAction());
if (intent.getAction().equals(Intents.ACTION_NEW_STATUS)) {
if (Config.logIncommingData)
log.debug("Received status: " + bundles);
if (bundles.containsKey("nsclientversioncode")) {
ConfigBuilderPlugin.nightscoutVersionCode = bundles.getInt("nightscoutversioncode"); // for ver 1.2.3 contains 10203
ConfigBuilderPlugin.nightscoutVersionName = bundles.getString("nightscoutversionname");
ConfigBuilderPlugin.nsClientVersionCode = bundles.getInt("nsclientversioncode"); // for ver 1.17 contains 117
ConfigBuilderPlugin.nsClientVersionName = bundles.getString("nsclientversionname");
log.debug("Got versions: NSClient: " + ConfigBuilderPlugin.nsClientVersionName + " Nightscout: " + ConfigBuilderPlugin.nightscoutVersionName);
if (ConfigBuilderPlugin.nsClientVersionCode < 118)
ToastUtils.showToastInUiThread(MainApp.instance().getApplicationContext(), MainApp.sResources.getString(R.string.unsupportedclientver));
} else {
ToastUtils.showToastInUiThread(MainApp.instance().getApplicationContext(), MainApp.sResources.getString(R.string.unsupportedclientver));
}
if (bundles.containsKey("status")) {
try {
JSONObject statusJson = new JSONObject(bundles.getString("status"));
if (statusJson.has("settings")) {
JSONObject settings = statusJson.getJSONObject("settings");
if (settings.has("thresholds")) {
JSONObject thresholds = settings.getJSONObject("thresholds");
if (thresholds.has("bgTargetTop")) {
OverviewPlugin.bgTargetHigh = thresholds.getDouble("bgTargetTop");
}
if (thresholds.has("bgTargetBottom")) {
OverviewPlugin.bgTargetLow = thresholds.getDouble("bgTargetBottom");
}
}
}
} catch (JSONException e) {
e.printStackTrace();
}
}
}
if (intent.getAction().equals(Intents.ACTION_NEW_DEVICESTATUS)) {
try {
if (bundles.containsKey("devicestatus")) {
String devicestatusesstring = bundles.getString("devicestatus");
JSONObject devicestatusJson = new JSONObject(bundles.getString("devicestatus"));
if (devicestatusJson.has("pump")) {
// Objectives 0
ObjectivesPlugin.pumpStatusIsAvailableInNS = true;
ObjectivesPlugin.saveProgress();
}
}
if (bundles.containsKey("devicestatuses")) {
String devicestatusesstring = bundles.getString("devicestatuses");
JSONArray jsonArray = new JSONArray(devicestatusesstring);
if (jsonArray.length() > 0) {
JSONObject devicestatusJson = jsonArray.getJSONObject(0);
if (devicestatusJson.has("pump")) {
// Objectives 0
ObjectivesPlugin.pumpStatusIsAvailableInNS = true;
ObjectivesPlugin.saveProgress();
}
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
// Handle profile
if (intent.getAction().equals(Intents.ACTION_NEW_PROFILE)) {
try {
String activeProfile = bundles.getString("activeprofile");
String profile = bundles.getString("profile");
NSProfile nsProfile = new NSProfile(new JSONObject(profile), activeProfile);
if (MainApp.getConfigBuilder() == null) {
log.error("Config builder not ready on receive profile");
return;
}
PumpInterface pump = MainApp.getConfigBuilder();
if (pump != null) {
SharedPreferences SP = PreferenceManager.getDefaultSharedPreferences(getApplicationContext());
if (SP.getBoolean("syncprofiletopump", false))
pump.setNewBasalProfile(nsProfile);
} else {
log.error("No active pump selected");
}
if (Config.logIncommingData)
log.debug("Received profile: " + activeProfile + " " + profile);
MainApp.bus().post(new EventNewBasalProfile(nsProfile));
} catch (JSONException e) {
e.printStackTrace();
}
}
if (intent.getAction().equals(Intents.ACTION_NEW_TREATMENT)) {
try {
if (bundles.containsKey("treatment")) {
String trstring = bundles.getString("treatment");
handleAddedTreatment(trstring);
}
if (bundles.containsKey("treatments")) {
String trstring = bundles.getString("treatments");
JSONArray jsonArray = new JSONArray(trstring);
for (int i = 0; i < jsonArray.length(); i++) {
JSONObject trJson = jsonArray.getJSONObject(i);
String trstr = trJson.toString();
handleAddedTreatment(trstr);
}
}
scheduleTreatmentChange();
} catch (Exception e) {
e.printStackTrace();
}
}
if (intent.getAction().equals(Intents.ACTION_CHANGED_TREATMENT)) {
try {
if (bundles.containsKey("treatment")) {
String trstring = bundles.getString("treatment");
handleChangedTreatment(trstring);
}
if (bundles.containsKey("treatments")) {
String trstring = bundles.getString("treatments");
JSONArray jsonArray = new JSONArray(trstring);
for (int i = 0; i < jsonArray.length(); i++) {
JSONObject trJson = jsonArray.getJSONObject(i);
String trstr = trJson.toString();
handleChangedTreatment(trstr);
}
}
scheduleTreatmentChange();
} catch (Exception e) {
e.printStackTrace();
}
}
if (intent.getAction().equals(Intents.ACTION_REMOVED_TREATMENT)) {
try {
if (bundles.containsKey("treatment")) {
String trstring = bundles.getString("treatment");
JSONObject trJson = new JSONObject(trstring);
String _id = trJson.getString("_id");
removeTreatmentFromDb(_id);
}
if (bundles.containsKey("treatments")) {
String trstring = bundles.getString("treatments");
JSONArray jsonArray = new JSONArray(trstring);
for (int i = 0; i < jsonArray.length(); i++) {
JSONObject trJson = jsonArray.getJSONObject(i);
String _id = trJson.getString("_id");
removeTreatmentFromDb(_id);
}
}
scheduleTreatmentChange();
} catch (Exception e) {
e.printStackTrace();
}
}
if (intent.getAction().equals(Intents.ACTION_NEW_SGV)) {
try {
if (bundles.containsKey("sgv")) {
String sgvstring = bundles.getString("sgv");
JSONObject sgvJson = new JSONObject(sgvstring);
NSSgv nsSgv = new NSSgv(sgvJson);
BgReading bgReading = new BgReading(nsSgv);
if (bgReading.timeIndex < new Date().getTime() - Constants.hoursToKeepInDatabase * 60 * 60 * 1000l) {
if (Config.logIncommingData)
log.debug("Ignoring old BG: " + bgReading.toString());
return;
}
MainApp.getDbHelper().getDaoBgReadings().createIfNotExists(bgReading);
if (Config.logIncommingData)
log.debug("ADD: Stored new BG: " + bgReading.toString());
}
if (bundles.containsKey("sgvs")) {
String sgvstring = bundles.getString("sgvs");
JSONArray jsonArray = new JSONArray(sgvstring);
for (int i = 0; i < jsonArray.length(); i++) {
JSONObject sgvJson = jsonArray.getJSONObject(i);
NSSgv nsSgv = new NSSgv(sgvJson);
BgReading bgReading = new BgReading(nsSgv);
if (bgReading.timeIndex < new Date().getTime() - Constants.hoursToKeepInDatabase * 60 * 60 * 1000l) {
if (Config.logIncommingData)
log.debug("Ignoring old BG: " + bgReading.toString());
} else {
MainApp.getDbHelper().getDaoBgReadings().createIfNotExists(bgReading);
if (Config.logIncommingData)
log.debug("ADD: Stored new BG: " + bgReading.toString());
}
}
}
} catch (Exception e) {
e.printStackTrace();
}
MainApp.bus().post(new EventNewBG());
}
if (intent.getAction().equals(Intents.ACTION_NEW_MBG)) {
log.error("Not implemented yet"); // TODO implemeng MBGS
}
}
private void handleAddedTreatment(String trstring) throws JSONException, SQLException {
JSONObject trJson = new JSONObject(trstring);
handleDanaRHistoryRecords(trJson); // update record _id in history
if (!trJson.has("insulin") && !trJson.has("carbs")) {
if (Config.logIncommingData)
log.debug("ADD: Uninterested treatment: " + trstring);
return;
}
Treatment stored = null;
String _id = trJson.getString("_id");
if (trJson.has("timeIndex")) {
if (Config.logIncommingData)
log.debug("ADD: timeIndex found: " + trstring);
stored = findByTimeIndex(trJson.getLong("timeIndex"));
} else {
stored = findById(_id);
}
if (stored != null) {
if (Config.logIncommingData)
log.debug("ADD: Existing treatment: " + trstring);
if (trJson.has("timeIndex")) {
stored._id = _id;
int updated = MainApp.getDbHelper().getDaoTreatments().update(stored);
if (Config.logIncommingData)
log.debug("Records updated: " + updated);
}
} else {
if (Config.logIncommingData)
log.debug("ADD: New treatment: " + trstring);
Treatment treatment = new Treatment();
treatment._id = _id;
treatment.carbs = trJson.has("carbs") ? trJson.getDouble("carbs") : 0;
treatment.insulin = trJson.has("insulin") ? trJson.getDouble("insulin") : 0d;
treatment.created_at = new Date(trJson.getLong("mills"));
if (trJson.has("eventType")) {
treatment.mealBolus = true;
if (trJson.get("eventType").equals("Correction Bolus")) treatment.mealBolus = false;
if (trJson.get("eventType").equals("Bolus Wizard") && treatment.carbs <= 0) treatment.mealBolus = false;
}
treatment.setTimeIndex(treatment.getTimeIndex());
try {
MainApp.getDbHelper().getDaoTreatments().createOrUpdate(treatment);
if (Config.logIncommingData)
log.debug("ADD: Stored treatment: " + treatment.log());
} catch (SQLException e) {
e.printStackTrace();
}
}
}
private void handleChangedTreatment(String trstring) throws JSONException, SQLException {
JSONObject trJson = new JSONObject(trstring);
handleDanaRHistoryRecords(trJson); // update record _id in history
if (!trJson.has("insulin") && !trJson.has("carbs")) {
if (Config.logIncommingData)
log.debug("CHANGE: Uninterested treatment: " + trstring);
return;
}
String _id = trJson.getString("_id");
Treatment stored;
if (trJson.has("timeIndex")) {
if (Config.logIncommingData)
log.debug("ADD: timeIndex found: " + trstring);
stored = findByTimeIndex(trJson.getLong("timeIndex"));
} else {
stored = findById(_id);
}
if (stored != null) {
if (Config.logIncommingData)
log.debug("CHANGE: Removing old: " + trstring);
removeTreatmentFromDb(_id);
}
if (Config.logIncommingData)
log.debug("CHANGE: Adding new treatment: " + trstring);
Treatment treatment = new Treatment();
treatment._id = _id;
treatment.carbs = trJson.has("carbs") ? trJson.getDouble("carbs") : 0;
treatment.insulin = trJson.has("insulin") ? trJson.getDouble("insulin") : 0d;
//treatment.created_at = DateUtil.fromISODateString(trJson.getString("created_at"));
treatment.created_at = new Date(trJson.getLong("mills"));
if (trJson.has("eventType")) {
treatment.mealBolus = true;
if (trJson.get("eventType").equals("Correction Bolus")) treatment.mealBolus = false;
if (trJson.get("eventType").equals("Bolus Wizard") && treatment.carbs <= 0) treatment.mealBolus = false;
}
treatment.setTimeIndex(treatment.getTimeIndex());
try {
Dao.CreateOrUpdateStatus status = MainApp.getDbHelper().getDaoTreatments().createOrUpdate(treatment);
if (Config.logIncommingData)
log.debug("Records updated: " + status.getNumLinesChanged());
if (Config.logIncommingData)
log.debug("CHANGE: Stored treatment: " + treatment.log());
} catch (SQLException e) {
e.printStackTrace();
}
}
public void handleDanaRHistoryRecords(JSONObject trJson) throws JSONException, SQLException {
if (trJson.has(DanaRNSHistorySync.DANARSIGNATURE)) {
Dao<DanaRHistoryRecord, String> daoHistoryRecords = MainApp.getDbHelper().getDaoDanaRHistory();
QueryBuilder<DanaRHistoryRecord, String> queryBuilder = daoHistoryRecords.queryBuilder();
Where where = queryBuilder.where();
where.ge("bytes", trJson.get(DanaRNSHistorySync.DANARSIGNATURE));
PreparedQuery<DanaRHistoryRecord> preparedQuery = queryBuilder.prepare();
List<DanaRHistoryRecord> list = daoHistoryRecords.query(preparedQuery);
if (list.size() == 0) {
// Record does not exists. Ignore
} else if (list.size() == 1) {
DanaRHistoryRecord record = list.get(0);
if (record.get_id() == null || record.get_id() != trJson.getString("_id")) {
if (Config.logIncommingData)
log.debug("Updating _id in DanaR history database: " + trJson.getString("_id"));
record.set_id(trJson.getString("_id"));
daoHistoryRecords.update(record);
} else {
// already set
}
}
}
}
@Nullable
public static Treatment findById(String _id) {
try {
Dao<Treatment, Long> daoTreatments = MainApp.getDbHelper().getDaoTreatments();
QueryBuilder<Treatment, Long> queryBuilder = daoTreatments.queryBuilder();
Where where = queryBuilder.where();
where.eq("_id", _id);
queryBuilder.limit(10);
PreparedQuery<Treatment> preparedQuery = queryBuilder.prepare();
List<Treatment> trList = daoTreatments.query(preparedQuery);
if (trList.size() != 1) {
//log.debug("Treatment findById query size: " + trList.size());
return null;
} else {
//log.debug("Treatment findById found: " + trList.get(0).log());
return trList.get(0);
}
} catch (SQLException e) {
e.printStackTrace();
}
return null;
}
@Nullable
public static Treatment findByTimeIndex(Long timeIndex) {
try {
QueryBuilder<Treatment, String> qb = null;
Dao<Treatment, Long> daoTreatments = MainApp.getDbHelper().getDaoTreatments();
QueryBuilder<Treatment, Long> queryBuilder = daoTreatments.queryBuilder();
Where where = queryBuilder.where();
where.eq("timeIndex", timeIndex);
queryBuilder.limit(10);
PreparedQuery<Treatment> preparedQuery = queryBuilder.prepare();
List<Treatment> trList = daoTreatments.query(preparedQuery);
if (trList.size() != 1) {
log.debug("Treatment findByTimeIndex query size: " + trList.size());
return null;
} else {
log.debug("Treatment findByTimeIndex found: " + trList.get(0).log());
return trList.get(0);
}
} catch (SQLException e) {
e.printStackTrace();
}
return null;
}
private void removeTreatmentFromDb(String _id) throws SQLException {
Treatment stored = findById(_id);
if (stored != null) {
log.debug("REMOVE: Existing treatment (removing): " + _id);
int removed = MainApp.getDbHelper().getDaoTreatments().delete(stored);
if (Config.logIncommingData)
log.debug("Records removed: " + removed);
scheduleTreatmentChange();
} else {
log.debug("REMOVE: Not stored treatment (ignoring): " + _id);
}
}
private void handleNewSMS(Intent intent) {
Bundle bundle = intent.getExtras();
if (bundle == null) return;
MainApp.bus().post(new EventNewSMS(bundle));
}
public void scheduleTreatmentChange() {
MainApp.bus().post(new EventTreatmentChange());
}
}
| 1 | 29,309 | @AdrianLxM finally found some your bug too :) | MilosKozak-AndroidAPS | java |
@@ -1543,7 +1543,9 @@ void cvdescriptorset::ImageSamplerDescriptor::UpdateDrawState(layer_data *dev_da
if (iv_state) {
core_validation::AddCommandBufferBindingImageView(dev_data, cb_node, iv_state);
}
- SetImageViewLayout(dev_data, cb_node, image_view_, image_layout_);
+ if (image_view_) {
+ SetImageViewLayout(dev_data, cb_node, image_view_, image_layout_);
+ }
}
cvdescriptorset::ImageDescriptor::ImageDescriptor(const VkDescriptorType type) | 1 | /* Copyright (c) 2015-2018 The Khronos Group Inc.
* Copyright (c) 2015-2018 Valve Corporation
* Copyright (c) 2015-2018 LunarG, Inc.
* Copyright (C) 2015-2018 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Tobin Ehlis <[email protected]>
* John Zulauf <[email protected]>
*/
// Allow use of STL min and max functions in Windows
#define NOMINMAX
#include "descriptor_sets.h"
#include "hash_vk_types.h"
#include "vk_enum_string_helper.h"
#include "vk_safe_struct.h"
#include "vk_typemap_helper.h"
#include "buffer_validation.h"
#include <sstream>
#include <algorithm>
#include <memory>
// ExtendedBinding collects a VkDescriptorSetLayoutBinding and any extended
// state that comes from a different array/structure so they can stay together
// while being sorted by binding number.
struct ExtendedBinding {
ExtendedBinding(const VkDescriptorSetLayoutBinding *l, VkDescriptorBindingFlagsEXT f) : layout_binding(l), binding_flags(f) {}
const VkDescriptorSetLayoutBinding *layout_binding;
VkDescriptorBindingFlagsEXT binding_flags;
};
struct BindingNumCmp {
bool operator()(const ExtendedBinding &a, const ExtendedBinding &b) const {
return a.layout_binding->binding < b.layout_binding->binding;
}
};
using DescriptorSetLayoutDef = cvdescriptorset::DescriptorSetLayoutDef;
using DescriptorSetLayoutId = cvdescriptorset::DescriptorSetLayoutId;
// Canonical dictionary of DescriptorSetLayoutDef (without any handle/device specific information)
cvdescriptorset::DescriptorSetLayoutDict descriptor_set_layout_dict;
DescriptorSetLayoutId GetCanonicalId(const VkDescriptorSetLayoutCreateInfo *p_create_info) {
return descriptor_set_layout_dict.look_up(DescriptorSetLayoutDef(p_create_info));
}
// Construct DescriptorSetLayout instance from given create info
// Proactively reserve and resize as possible, as the reallocation was visible in profiling
cvdescriptorset::DescriptorSetLayoutDef::DescriptorSetLayoutDef(const VkDescriptorSetLayoutCreateInfo *p_create_info)
: flags_(p_create_info->flags), binding_count_(0), descriptor_count_(0), dynamic_descriptor_count_(0) {
const auto *flags_create_info = lvl_find_in_chain<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>(p_create_info->pNext);
binding_type_stats_ = {0, 0, 0};
std::set<ExtendedBinding, BindingNumCmp> sorted_bindings;
const uint32_t input_bindings_count = p_create_info->bindingCount;
// Sort the input bindings in binding number order, eliminating duplicates
for (uint32_t i = 0; i < input_bindings_count; i++) {
VkDescriptorBindingFlagsEXT flags = 0;
if (flags_create_info && flags_create_info->bindingCount == p_create_info->bindingCount) {
flags = flags_create_info->pBindingFlags[i];
}
sorted_bindings.insert(ExtendedBinding(p_create_info->pBindings + i, flags));
}
// Store the create info in the sorted order from above
std::map<uint32_t, uint32_t> binding_to_dyn_count;
uint32_t index = 0;
binding_count_ = static_cast<uint32_t>(sorted_bindings.size());
bindings_.reserve(binding_count_);
binding_flags_.reserve(binding_count_);
binding_to_index_map_.reserve(binding_count_);
for (auto input_binding : sorted_bindings) {
// Add to binding and map, s.t. it is robust to invalid duplication of binding_num
const auto binding_num = input_binding.layout_binding->binding;
binding_to_index_map_[binding_num] = index++;
bindings_.emplace_back(input_binding.layout_binding);
auto &binding_info = bindings_.back();
binding_flags_.emplace_back(input_binding.binding_flags);
descriptor_count_ += binding_info.descriptorCount;
if (binding_info.descriptorCount > 0) {
non_empty_bindings_.insert(binding_num);
}
if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
binding_to_dyn_count[binding_num] = binding_info.descriptorCount;
dynamic_descriptor_count_ += binding_info.descriptorCount;
binding_type_stats_.dynamic_buffer_count++;
} else if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) ||
(binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)) {
binding_type_stats_.non_dynamic_buffer_count++;
} else {
binding_type_stats_.image_sampler_count++;
}
}
assert(bindings_.size() == binding_count_);
assert(binding_flags_.size() == binding_count_);
uint32_t global_index = 0;
binding_to_global_index_range_map_.reserve(binding_count_);
// Vector order is finalized so create maps of bindings to descriptors and descriptors to indices
for (uint32_t i = 0; i < binding_count_; ++i) {
auto binding_num = bindings_[i].binding;
auto final_index = global_index + bindings_[i].descriptorCount;
binding_to_global_index_range_map_[binding_num] = IndexRange(global_index, final_index);
if (final_index != global_index) {
global_start_to_index_map_[global_index] = i;
}
global_index = final_index;
}
// Now create dyn offset array mapping for any dynamic descriptors
uint32_t dyn_array_idx = 0;
binding_to_dynamic_array_idx_map_.reserve(binding_to_dyn_count.size());
for (const auto &bc_pair : binding_to_dyn_count) {
binding_to_dynamic_array_idx_map_[bc_pair.first] = dyn_array_idx;
dyn_array_idx += bc_pair.second;
}
}
size_t cvdescriptorset::DescriptorSetLayoutDef::hash() const {
hash_util::HashCombiner hc;
hc << flags_;
hc.Combine(bindings_);
hc.Combine(binding_flags_);
return hc.Value();
}
//
// Return valid index or "end" i.e. binding_count_;
// The asserts in "Get" are reduced to the set where no valid answer(like null or 0) could be given
// Common code for all binding lookups.
uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetIndexFromBinding(uint32_t binding) const {
const auto &bi_itr = binding_to_index_map_.find(binding);
if (bi_itr != binding_to_index_map_.cend()) return bi_itr->second;
return GetBindingCount();
}
VkDescriptorSetLayoutBinding const *cvdescriptorset::DescriptorSetLayoutDef::GetDescriptorSetLayoutBindingPtrFromIndex(
const uint32_t index) const {
if (index >= bindings_.size()) return nullptr;
return bindings_[index].ptr();
}
// Return descriptorCount for given index, 0 if index is unavailable
uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetDescriptorCountFromIndex(const uint32_t index) const {
if (index >= bindings_.size()) return 0;
return bindings_[index].descriptorCount;
}
// For the given index, return descriptorType
VkDescriptorType cvdescriptorset::DescriptorSetLayoutDef::GetTypeFromIndex(const uint32_t index) const {
assert(index < bindings_.size());
if (index < bindings_.size()) return bindings_[index].descriptorType;
return VK_DESCRIPTOR_TYPE_MAX_ENUM;
}
// For the given index, return stageFlags
VkShaderStageFlags cvdescriptorset::DescriptorSetLayoutDef::GetStageFlagsFromIndex(const uint32_t index) const {
assert(index < bindings_.size());
if (index < bindings_.size()) return bindings_[index].stageFlags;
return VkShaderStageFlags(0);
}
// Return binding flags for given index, 0 if index is unavailable
VkDescriptorBindingFlagsEXT cvdescriptorset::DescriptorSetLayoutDef::GetDescriptorBindingFlagsFromIndex(
const uint32_t index) const {
if (index >= binding_flags_.size()) return 0;
return binding_flags_[index];
}
// For the given global index, return index
uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetIndexFromGlobalIndex(const uint32_t global_index) const {
auto start_it = global_start_to_index_map_.upper_bound(global_index);
uint32_t index = binding_count_;
assert(start_it != global_start_to_index_map_.cbegin());
if (start_it != global_start_to_index_map_.cbegin()) {
--start_it;
index = start_it->second;
#ifndef NDEBUG
const auto &range = GetGlobalIndexRangeFromBinding(bindings_[index].binding);
assert(range.start <= global_index && global_index < range.end);
#endif
}
return index;
}
// For the given binding, return the global index range
// As start and end are often needed in pairs, get both with a single hash lookup.
const cvdescriptorset::IndexRange &cvdescriptorset::DescriptorSetLayoutDef::GetGlobalIndexRangeFromBinding(
const uint32_t binding) const {
assert(binding_to_global_index_range_map_.count(binding));
// In error case max uint32_t so index is out of bounds to break ASAP
const static IndexRange kInvalidRange = {0xFFFFFFFF, 0xFFFFFFFF};
const auto &range_it = binding_to_global_index_range_map_.find(binding);
if (range_it != binding_to_global_index_range_map_.end()) {
return range_it->second;
}
return kInvalidRange;
}
// For given binding, return ptr to ImmutableSampler array
VkSampler const *cvdescriptorset::DescriptorSetLayoutDef::GetImmutableSamplerPtrFromBinding(const uint32_t binding) const {
const auto &bi_itr = binding_to_index_map_.find(binding);
if (bi_itr != binding_to_index_map_.end()) {
return bindings_[bi_itr->second].pImmutableSamplers;
}
return nullptr;
}
// Move to next valid binding having a non-zero binding count
uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetNextValidBinding(const uint32_t binding) const {
auto it = non_empty_bindings_.upper_bound(binding);
assert(it != non_empty_bindings_.cend());
if (it != non_empty_bindings_.cend()) return *it;
return GetMaxBinding() + 1;
}
// For given index, return ptr to ImmutableSampler array
VkSampler const *cvdescriptorset::DescriptorSetLayoutDef::GetImmutableSamplerPtrFromIndex(const uint32_t index) const {
if (index < bindings_.size()) {
return bindings_[index].pImmutableSamplers;
}
return nullptr;
}
// If our layout is compatible with rh_ds_layout, return true,
// else return false and fill in error_msg will description of what causes incompatibility
bool cvdescriptorset::DescriptorSetLayout::IsCompatible(DescriptorSetLayout const *const rh_ds_layout,
std::string *error_msg) const {
// Trivial case
if (layout_ == rh_ds_layout->GetDescriptorSetLayout()) return true;
if (GetLayoutDef() == rh_ds_layout->GetLayoutDef()) return true;
bool detailed_compat_check =
GetLayoutDef()->IsCompatible(layout_, rh_ds_layout->GetDescriptorSetLayout(), rh_ds_layout->GetLayoutDef(), error_msg);
// The detailed check should never tell us mismatching DSL are compatible
assert(!detailed_compat_check);
return detailed_compat_check;
}
// Do a detailed compatibility check of this def (referenced by ds_layout), vs. the rhs (layout and def)
// Should only be called if trivial accept has failed, and in that context should return false.
bool cvdescriptorset::DescriptorSetLayoutDef::IsCompatible(VkDescriptorSetLayout ds_layout, VkDescriptorSetLayout rh_ds_layout,
DescriptorSetLayoutDef const *const rh_ds_layout_def,
std::string *error_msg) const {
if (descriptor_count_ != rh_ds_layout_def->descriptor_count_) {
std::stringstream error_str;
error_str << "DescriptorSetLayout " << ds_layout << " has " << descriptor_count_ << " descriptors, but DescriptorSetLayout "
<< rh_ds_layout << ", which comes from pipelineLayout, has " << rh_ds_layout_def->descriptor_count_
<< " descriptors.";
*error_msg = error_str.str();
return false; // trivial fail case
}
// Descriptor counts match so need to go through bindings one-by-one
// and verify that type and stageFlags match
for (auto binding : bindings_) {
// TODO : Do we also need to check immutable samplers?
// VkDescriptorSetLayoutBinding *rh_binding;
if (binding.descriptorCount != rh_ds_layout_def->GetDescriptorCountFromBinding(binding.binding)) {
std::stringstream error_str;
error_str << "Binding " << binding.binding << " for DescriptorSetLayout " << ds_layout << " has a descriptorCount of "
<< binding.descriptorCount << " but binding " << binding.binding << " for DescriptorSetLayout "
<< rh_ds_layout << ", which comes from pipelineLayout, has a descriptorCount of "
<< rh_ds_layout_def->GetDescriptorCountFromBinding(binding.binding);
*error_msg = error_str.str();
return false;
} else if (binding.descriptorType != rh_ds_layout_def->GetTypeFromBinding(binding.binding)) {
std::stringstream error_str;
error_str << "Binding " << binding.binding << " for DescriptorSetLayout " << ds_layout << " is type '"
<< string_VkDescriptorType(binding.descriptorType) << "' but binding " << binding.binding
<< " for DescriptorSetLayout " << rh_ds_layout << ", which comes from pipelineLayout, is type '"
<< string_VkDescriptorType(rh_ds_layout_def->GetTypeFromBinding(binding.binding)) << "'";
*error_msg = error_str.str();
return false;
} else if (binding.stageFlags != rh_ds_layout_def->GetStageFlagsFromBinding(binding.binding)) {
std::stringstream error_str;
error_str << "Binding " << binding.binding << " for DescriptorSetLayout " << ds_layout << " has stageFlags "
<< binding.stageFlags << " but binding " << binding.binding << " for DescriptorSetLayout " << rh_ds_layout
<< ", which comes from pipelineLayout, has stageFlags "
<< rh_ds_layout_def->GetStageFlagsFromBinding(binding.binding);
*error_msg = error_str.str();
return false;
}
}
return true;
}
bool cvdescriptorset::DescriptorSetLayoutDef::IsNextBindingConsistent(const uint32_t binding) const {
if (!binding_to_index_map_.count(binding + 1)) return false;
auto const &bi_itr = binding_to_index_map_.find(binding);
if (bi_itr != binding_to_index_map_.end()) {
const auto &next_bi_itr = binding_to_index_map_.find(binding + 1);
if (next_bi_itr != binding_to_index_map_.end()) {
auto type = bindings_[bi_itr->second].descriptorType;
auto stage_flags = bindings_[bi_itr->second].stageFlags;
auto immut_samp = bindings_[bi_itr->second].pImmutableSamplers ? true : false;
auto flags = binding_flags_[bi_itr->second];
if ((type != bindings_[next_bi_itr->second].descriptorType) ||
(stage_flags != bindings_[next_bi_itr->second].stageFlags) ||
(immut_samp != (bindings_[next_bi_itr->second].pImmutableSamplers ? true : false)) ||
(flags != binding_flags_[next_bi_itr->second])) {
return false;
}
return true;
}
}
return false;
}
// Starting at offset descriptor of given binding, parse over update_count
// descriptor updates and verify that for any binding boundaries that are crossed, the next binding(s) are all consistent
// Consistency means that their type, stage flags, and whether or not they use immutable samplers matches
// If so, return true. If not, fill in error_msg and return false
bool cvdescriptorset::DescriptorSetLayoutDef::VerifyUpdateConsistency(uint32_t current_binding, uint32_t offset,
uint32_t update_count, const char *type,
const VkDescriptorSet set, std::string *error_msg) const {
// Verify consecutive bindings match (if needed)
auto orig_binding = current_binding;
// Track count of descriptors in the current_bindings that are remaining to be updated
auto binding_remaining = GetDescriptorCountFromBinding(current_binding);
// First, it's legal to offset beyond your own binding so handle that case
// Really this is just searching for the binding in which the update begins and adjusting offset accordingly
while (offset >= binding_remaining) {
// Advance to next binding, decrement offset by binding size
offset -= binding_remaining;
binding_remaining = GetDescriptorCountFromBinding(++current_binding);
}
binding_remaining -= offset;
while (update_count > binding_remaining) { // While our updates overstep current binding
// Verify next consecutive binding matches type, stage flags & immutable sampler use
if (!IsNextBindingConsistent(current_binding++)) {
std::stringstream error_str;
error_str << "Attempting " << type;
if (IsPushDescriptor()) {
error_str << " push descriptors";
} else {
error_str << " descriptor set " << set;
}
error_str << " binding #" << orig_binding << " with #" << update_count
<< " descriptors being updated but this update oversteps the bounds of this binding and the next binding is "
"not consistent with current binding so this update is invalid.";
*error_msg = error_str.str();
return false;
}
// For sake of this check consider the bindings updated and grab count for next binding
update_count -= binding_remaining;
binding_remaining = GetDescriptorCountFromBinding(current_binding);
}
return true;
}
// The DescriptorSetLayout stores the per handle data for a descriptor set layout, and references the common defintion for the
// handle invariant portion
cvdescriptorset::DescriptorSetLayout::DescriptorSetLayout(const VkDescriptorSetLayoutCreateInfo *p_create_info,
const VkDescriptorSetLayout layout)
: layout_(layout), layout_destroyed_(false), layout_id_(GetCanonicalId(p_create_info)) {}
// Validate descriptor set layout create info
bool cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(
const debug_report_data *report_data, const VkDescriptorSetLayoutCreateInfo *create_info, const bool push_descriptor_ext,
const uint32_t max_push_descriptors, const bool descriptor_indexing_ext,
const VkPhysicalDeviceDescriptorIndexingFeaturesEXT *descriptor_indexing_features,
const VkPhysicalDeviceInlineUniformBlockFeaturesEXT *inline_uniform_block_features,
const VkPhysicalDeviceInlineUniformBlockPropertiesEXT *inline_uniform_block_props) {
bool skip = false;
std::unordered_set<uint32_t> bindings;
uint64_t total_descriptors = 0;
const auto *flags_create_info = lvl_find_in_chain<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>(create_info->pNext);
const bool push_descriptor_set = !!(create_info->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
if (push_descriptor_set && !push_descriptor_ext) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_ExtensionNotEnabled,
"Attempted to use %s in %s but its required extension %s has not been enabled.\n",
"VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR", "VkDescriptorSetLayoutCreateInfo::flags",
VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
}
const bool update_after_bind_set = !!(create_info->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT);
if (update_after_bind_set && !descriptor_indexing_ext) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_ExtensionNotEnabled,
"Attemped to use %s in %s but its required extension %s has not been enabled.\n",
"VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT", "VkDescriptorSetLayoutCreateInfo::flags",
VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME);
}
auto valid_type = [push_descriptor_set](const VkDescriptorType type) {
return !push_descriptor_set ||
((type != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) && (type != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
(type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT));
};
uint32_t max_binding = 0;
for (uint32_t i = 0; i < create_info->bindingCount; ++i) {
const auto &binding_info = create_info->pBindings[i];
max_binding = std::max(max_binding, binding_info.binding);
if (!bindings.insert(binding_info.binding).second) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutCreateInfo-binding-00279",
"duplicated binding number in VkDescriptorSetLayoutBinding.");
}
if (!valid_type(binding_info.descriptorType)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
(binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
? "VUID-VkDescriptorSetLayoutCreateInfo-flags-02208"
: "VUID-VkDescriptorSetLayoutCreateInfo-flags-00280",
"invalid type %s ,for push descriptors in VkDescriptorSetLayoutBinding entry %" PRIu32 ".",
string_VkDescriptorType(binding_info.descriptorType), i);
}
if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
if ((binding_info.descriptorCount % 4) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBinding-descriptorType-02209",
"descriptorCount =(%" PRIu32 ") must be a multiple of 4", binding_info.descriptorCount);
}
if (binding_info.descriptorCount > inline_uniform_block_props->maxInlineUniformBlockSize) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBinding-descriptorType-02210",
"descriptorCount =(%" PRIu32 ") must be less than or equal to maxInlineUniformBlockSize",
binding_info.descriptorCount);
}
}
total_descriptors += binding_info.descriptorCount;
}
if (flags_create_info) {
if (flags_create_info->bindingCount != 0 && flags_create_info->bindingCount != create_info->bindingCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-bindingCount-03002",
"VkDescriptorSetLayoutCreateInfo::bindingCount (%d) != "
"VkDescriptorSetLayoutBindingFlagsCreateInfoEXT::bindingCount (%d)",
create_info->bindingCount, flags_create_info->bindingCount);
}
if (flags_create_info->bindingCount == create_info->bindingCount) {
for (uint32_t i = 0; i < create_info->bindingCount; ++i) {
const auto &binding_info = create_info->pBindings[i];
if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT) {
if (!update_after_bind_set) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutCreateInfo-flags-03000",
"Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i);
}
if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER &&
!descriptor_indexing_features->descriptorBindingUniformBufferUpdateAfterBind) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-"
"descriptorBindingUniformBufferUpdateAfterBind-03005",
"Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i);
}
if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER ||
binding_info.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
binding_info.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) &&
!descriptor_indexing_features->descriptorBindingSampledImageUpdateAfterBind) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-"
"descriptorBindingSampledImageUpdateAfterBind-03006",
"Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i);
}
if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
!descriptor_indexing_features->descriptorBindingStorageImageUpdateAfterBind) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-"
"descriptorBindingStorageImageUpdateAfterBind-03007",
"Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i);
}
if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER &&
!descriptor_indexing_features->descriptorBindingStorageBufferUpdateAfterBind) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-"
"descriptorBindingStorageBufferUpdateAfterBind-03008",
"Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i);
}
if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER &&
!descriptor_indexing_features->descriptorBindingUniformTexelBufferUpdateAfterBind) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-"
"descriptorBindingUniformTexelBufferUpdateAfterBind-03009",
"Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i);
}
if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER &&
!descriptor_indexing_features->descriptorBindingStorageTexelBufferUpdateAfterBind) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-"
"descriptorBindingStorageTexelBufferUpdateAfterBind-03010",
"Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i);
}
if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT ||
binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-None-03011",
"Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i);
}
if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT &&
!inline_uniform_block_features->descriptorBindingInlineUniformBlockUpdateAfterBind) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-"
"descriptorBindingInlineUniformBlockUpdateAfterBind-02211",
"Invalid flags (VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT) for "
"VkDescriptorSetLayoutBinding entry %" PRIu32
" with descriptorBindingInlineUniformBlockUpdateAfterBind not enabled",
i);
}
}
if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT) {
if (!descriptor_indexing_features->descriptorBindingUpdateUnusedWhilePending) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-descriptorBindingUpdateUnusedWhilePending-03012",
"Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i);
}
}
if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT) {
if (!descriptor_indexing_features->descriptorBindingPartiallyBound) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-descriptorBindingPartiallyBound-03013",
"Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i);
}
}
if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT) {
if (binding_info.binding != max_binding) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-pBindingFlags-03004",
"Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i);
}
if (!descriptor_indexing_features->descriptorBindingVariableDescriptorCount) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-descriptorBindingVariableDescriptorCount-03014",
"Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i);
}
if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-pBindingFlags-03015",
"Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i);
}
}
if (push_descriptor_set &&
(flags_create_info->pBindingFlags[i] &
(VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT |
VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-flags-03003",
"Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i);
}
}
}
}
if ((push_descriptor_set) && (total_descriptors > max_push_descriptors)) {
const char *undefined = push_descriptor_ext ? "" : " -- undefined";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutCreateInfo-flags-00281",
"for push descriptor, total descriptor count in layout (%" PRIu64
") must not be greater than VkPhysicalDevicePushDescriptorPropertiesKHR::maxPushDescriptors (%" PRIu32 "%s).",
total_descriptors, max_push_descriptors, undefined);
}
return skip;
}
cvdescriptorset::AllocateDescriptorSetsData::AllocateDescriptorSetsData(uint32_t count)
: required_descriptors_by_type{}, layout_nodes(count, nullptr) {}
cvdescriptorset::DescriptorSet::DescriptorSet(const VkDescriptorSet set, const VkDescriptorPool pool,
const std::shared_ptr<DescriptorSetLayout const> &layout, uint32_t variable_count,
layer_data *dev_data)
: some_update_(false),
set_(set),
pool_state_(nullptr),
p_layout_(layout),
device_data_(dev_data),
limits_(GetPhysDevProperties(dev_data)->properties.limits),
variable_count_(variable_count) {
pool_state_ = GetDescriptorPoolState(dev_data, pool);
// Foreach binding, create default descriptors of given type
descriptors_.reserve(p_layout_->GetTotalDescriptorCount());
for (uint32_t i = 0; i < p_layout_->GetBindingCount(); ++i) {
auto type = p_layout_->GetTypeFromIndex(i);
switch (type) {
case VK_DESCRIPTOR_TYPE_SAMPLER: {
auto immut_sampler = p_layout_->GetImmutableSamplerPtrFromIndex(i);
for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) {
if (immut_sampler) {
descriptors_.emplace_back(new SamplerDescriptor(immut_sampler + di));
some_update_ = true; // Immutable samplers are updated at creation
} else
descriptors_.emplace_back(new SamplerDescriptor(nullptr));
}
break;
}
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
auto immut = p_layout_->GetImmutableSamplerPtrFromIndex(i);
for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) {
if (immut) {
descriptors_.emplace_back(new ImageSamplerDescriptor(immut + di));
some_update_ = true; // Immutable samplers are updated at creation
} else
descriptors_.emplace_back(new ImageSamplerDescriptor(nullptr));
}
break;
}
// ImageDescriptors
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di)
descriptors_.emplace_back(new ImageDescriptor(type));
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di)
descriptors_.emplace_back(new TexelDescriptor(type));
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di)
descriptors_.emplace_back(new BufferDescriptor(type));
break;
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di)
descriptors_.emplace_back(new InlineUniformDescriptor(type));
break;
case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV:
for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di)
descriptors_.emplace_back(new AccelerationStructureDescriptor(type));
break;
default:
assert(0); // Bad descriptor type specified
break;
}
}
}
cvdescriptorset::DescriptorSet::~DescriptorSet() { InvalidateBoundCmdBuffers(); }
static std::string StringDescriptorReqViewType(descriptor_req req) {
std::string result("");
for (unsigned i = 0; i <= VK_IMAGE_VIEW_TYPE_END_RANGE; i++) {
if (req & (1 << i)) {
if (result.size()) result += ", ";
result += string_VkImageViewType(VkImageViewType(i));
}
}
if (!result.size()) result = "(none)";
return result;
}
static char const *StringDescriptorReqComponentType(descriptor_req req) {
if (req & DESCRIPTOR_REQ_COMPONENT_TYPE_SINT) return "SINT";
if (req & DESCRIPTOR_REQ_COMPONENT_TYPE_UINT) return "UINT";
if (req & DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT) return "FLOAT";
return "(none)";
}
// Is this sets underlying layout compatible with passed in layout according to "Pipeline Layout Compatibility" in spec?
bool cvdescriptorset::DescriptorSet::IsCompatible(DescriptorSetLayout const *const layout, std::string *error) const {
return layout->IsCompatible(p_layout_.get(), error);
}
static unsigned DescriptorRequirementsBitsFromFormat(VkFormat fmt) {
if (FormatIsSInt(fmt)) return DESCRIPTOR_REQ_COMPONENT_TYPE_SINT;
if (FormatIsUInt(fmt)) return DESCRIPTOR_REQ_COMPONENT_TYPE_UINT;
if (FormatIsDepthAndStencil(fmt)) return DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT | DESCRIPTOR_REQ_COMPONENT_TYPE_UINT;
if (fmt == VK_FORMAT_UNDEFINED) return 0;
// everything else -- UNORM/SNORM/FLOAT/USCALED/SSCALED is all float in the shader.
return DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT;
}
// Validate that the state of this set is appropriate for the given bindings and dynamic_offsets at Draw time
// This includes validating that all descriptors in the given bindings are updated,
// that any update buffers are valid, and that any dynamic offsets are within the bounds of their buffers.
// Return true if state is acceptable, or false and write an error message into error string
bool cvdescriptorset::DescriptorSet::ValidateDrawState(const std::map<uint32_t, descriptor_req> &bindings,
const std::vector<uint32_t> &dynamic_offsets, GLOBAL_CB_NODE *cb_node,
const char *caller, std::string *error) const {
for (auto binding_pair : bindings) {
auto binding = binding_pair.first;
if (!p_layout_->HasBinding(binding)) {
std::stringstream error_str;
error_str << "Attempting to validate DrawState for binding #" << binding
<< " which is an invalid binding for this descriptor set.";
*error = error_str.str();
return false;
}
IndexRange index_range = p_layout_->GetGlobalIndexRangeFromBinding(binding);
auto array_idx = 0; // Track array idx if we're dealing with array descriptors
if (IsVariableDescriptorCount(binding)) {
// Only validate the first N descriptors if it uses variable_count
index_range.end = index_range.start + GetVariableDescriptorCount();
}
for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
if ((p_layout_->GetDescriptorBindingFlagsFromBinding(binding) & VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT) ||
descriptors_[i]->GetClass() == InlineUniform) {
// Can't validate the descriptor because it may not have been updated,
// or the view could have been destroyed
continue;
} else if (!descriptors_[i]->updated) {
std::stringstream error_str;
error_str << "Descriptor in binding #" << binding << " at global descriptor index " << i
<< " is being used in draw but has not been updated.";
*error = error_str.str();
return false;
} else {
auto descriptor_class = descriptors_[i]->GetClass();
if (descriptor_class == GeneralBuffer) {
// Verify that buffers are valid
auto buffer = static_cast<BufferDescriptor *>(descriptors_[i].get())->GetBuffer();
auto buffer_node = GetBufferState(device_data_, buffer);
if (!buffer_node) {
std::stringstream error_str;
error_str << "Descriptor in binding #" << binding << " at global descriptor index " << i
<< " references invalid buffer " << buffer << ".";
*error = error_str.str();
return false;
} else if (!buffer_node->sparse) {
for (auto mem_binding : buffer_node->GetBoundMemory()) {
if (!GetMemObjInfo(device_data_, mem_binding)) {
std::stringstream error_str;
error_str << "Descriptor in binding #" << binding << " at global descriptor index " << i
<< " uses buffer " << buffer << " that references invalid memory " << mem_binding << ".";
*error = error_str.str();
return false;
}
}
}
if (descriptors_[i]->IsDynamic()) {
// Validate that dynamic offsets are within the buffer
auto buffer_size = buffer_node->createInfo.size;
auto range = static_cast<BufferDescriptor *>(descriptors_[i].get())->GetRange();
auto desc_offset = static_cast<BufferDescriptor *>(descriptors_[i].get())->GetOffset();
auto dyn_offset = dynamic_offsets[GetDynamicOffsetIndexFromBinding(binding) + array_idx];
if (VK_WHOLE_SIZE == range) {
if ((dyn_offset + desc_offset) > buffer_size) {
std::stringstream error_str;
error_str << "Dynamic descriptor in binding #" << binding << " at global descriptor index " << i
<< " uses buffer " << buffer << " with update range of VK_WHOLE_SIZE has dynamic offset "
<< dyn_offset << " combined with offset " << desc_offset
<< " that oversteps the buffer size of " << buffer_size << ".";
*error = error_str.str();
return false;
}
} else {
if ((dyn_offset + desc_offset + range) > buffer_size) {
std::stringstream error_str;
error_str << "Dynamic descriptor in binding #" << binding << " at global descriptor index " << i
<< " uses buffer " << buffer << " with dynamic offset " << dyn_offset
<< " combined with offset " << desc_offset << " and range " << range
<< " that oversteps the buffer size of " << buffer_size << ".";
*error = error_str.str();
return false;
}
}
}
} else if (descriptor_class == ImageSampler || descriptor_class == Image) {
VkImageView image_view;
VkImageLayout image_layout;
if (descriptor_class == ImageSampler) {
image_view = static_cast<ImageSamplerDescriptor *>(descriptors_[i].get())->GetImageView();
image_layout = static_cast<ImageSamplerDescriptor *>(descriptors_[i].get())->GetImageLayout();
} else {
image_view = static_cast<ImageDescriptor *>(descriptors_[i].get())->GetImageView();
image_layout = static_cast<ImageDescriptor *>(descriptors_[i].get())->GetImageLayout();
}
auto reqs = binding_pair.second;
auto image_view_state = GetImageViewState(device_data_, image_view);
if (nullptr == image_view_state) {
// Image view must have been destroyed since initial update. Could potentially flag the descriptor
// as "invalid" (updated = false) at DestroyImageView() time and detect this error at bind time
std::stringstream error_str;
error_str << "Descriptor in binding #" << binding << " at global descriptor index " << i
<< " is using imageView " << image_view << " that has been destroyed.";
*error = error_str.str();
return false;
}
auto image_view_ci = image_view_state->create_info;
if ((reqs & DESCRIPTOR_REQ_ALL_VIEW_TYPE_BITS) && (~reqs & (1 << image_view_ci.viewType))) {
// bad view type
std::stringstream error_str;
error_str << "Descriptor in binding #" << binding << " at global descriptor index " << i
<< " requires an image view of type " << StringDescriptorReqViewType(reqs) << " but got "
<< string_VkImageViewType(image_view_ci.viewType) << ".";
*error = error_str.str();
return false;
}
auto format_bits = DescriptorRequirementsBitsFromFormat(image_view_ci.format);
if (!(reqs & format_bits)) {
// bad component type
std::stringstream error_str;
error_str << "Descriptor in binding #" << binding << " at global descriptor index " << i << " requires "
<< StringDescriptorReqComponentType(reqs) << " component type, but bound descriptor format is "
<< string_VkFormat(image_view_ci.format) << ".";
*error = error_str.str();
return false;
}
auto image_node = GetImageState(device_data_, image_view_ci.image);
assert(image_node);
// Verify Image Layout
// Copy first mip level into sub_layers and loop over each mip level to verify layout
VkImageSubresourceLayers sub_layers;
sub_layers.aspectMask = image_view_ci.subresourceRange.aspectMask;
sub_layers.baseArrayLayer = image_view_ci.subresourceRange.baseArrayLayer;
sub_layers.layerCount = image_view_ci.subresourceRange.layerCount;
bool hit_error = false;
for (auto cur_level = image_view_ci.subresourceRange.baseMipLevel;
cur_level < image_view_ci.subresourceRange.levelCount; ++cur_level) {
sub_layers.mipLevel = cur_level;
// No "invalid layout" VUID required for this call, since the optimal_layout parameter is UNDEFINED.
VerifyImageLayout(device_data_, cb_node, image_node, sub_layers, image_layout, VK_IMAGE_LAYOUT_UNDEFINED,
caller, kVUIDUndefined, "VUID-VkDescriptorImageInfo-imageLayout-00344", &hit_error);
if (hit_error) {
*error =
"Image layout specified at vkUpdateDescriptorSet* or vkCmdPushDescriptorSet* time "
"doesn't match actual image layout at time descriptor is used. See previous error callback for "
"specific details.";
return false;
}
}
// Verify Sample counts
if ((reqs & DESCRIPTOR_REQ_SINGLE_SAMPLE) && image_node->createInfo.samples != VK_SAMPLE_COUNT_1_BIT) {
std::stringstream error_str;
error_str << "Descriptor in binding #" << binding << " at global descriptor index " << i
<< " requires bound image to have VK_SAMPLE_COUNT_1_BIT but got "
<< string_VkSampleCountFlagBits(image_node->createInfo.samples) << ".";
*error = error_str.str();
return false;
}
if ((reqs & DESCRIPTOR_REQ_MULTI_SAMPLE) && image_node->createInfo.samples == VK_SAMPLE_COUNT_1_BIT) {
std::stringstream error_str;
error_str << "Descriptor in binding #" << binding << " at global descriptor index " << i
<< " requires bound image to have multiple samples, but got VK_SAMPLE_COUNT_1_BIT.";
*error = error_str.str();
return false;
}
}
if (descriptor_class == ImageSampler || descriptor_class == PlainSampler) {
// Verify Sampler still valid
VkSampler sampler;
if (descriptor_class == ImageSampler) {
sampler = static_cast<ImageSamplerDescriptor *>(descriptors_[i].get())->GetSampler();
} else {
sampler = static_cast<SamplerDescriptor *>(descriptors_[i].get())->GetSampler();
}
if (!ValidateSampler(sampler, device_data_)) {
std::stringstream error_str;
error_str << "Descriptor in binding #" << binding << " at global descriptor index " << i
<< " is using sampler " << sampler << " that has been destroyed.";
*error = error_str.str();
return false;
}
}
}
}
}
return true;
}
// For given bindings, place any update buffers or images into the passed-in unordered_sets
uint32_t cvdescriptorset::DescriptorSet::GetStorageUpdates(const std::map<uint32_t, descriptor_req> &bindings,
std::unordered_set<VkBuffer> *buffer_set,
std::unordered_set<VkImageView> *image_set) const {
auto num_updates = 0;
for (auto binding_pair : bindings) {
auto binding = binding_pair.first;
// If a binding doesn't exist, skip it
if (!p_layout_->HasBinding(binding)) {
continue;
}
uint32_t start_idx = p_layout_->GetGlobalIndexRangeFromBinding(binding).start;
if (descriptors_[start_idx]->IsStorage()) {
if (Image == descriptors_[start_idx]->descriptor_class) {
for (uint32_t i = 0; i < p_layout_->GetDescriptorCountFromBinding(binding); ++i) {
if (descriptors_[start_idx + i]->updated) {
image_set->insert(static_cast<ImageDescriptor *>(descriptors_[start_idx + i].get())->GetImageView());
num_updates++;
}
}
} else if (TexelBuffer == descriptors_[start_idx]->descriptor_class) {
for (uint32_t i = 0; i < p_layout_->GetDescriptorCountFromBinding(binding); ++i) {
if (descriptors_[start_idx + i]->updated) {
auto bufferview = static_cast<TexelDescriptor *>(descriptors_[start_idx + i].get())->GetBufferView();
auto bv_state = GetBufferViewState(device_data_, bufferview);
if (bv_state) {
buffer_set->insert(bv_state->create_info.buffer);
num_updates++;
}
}
}
} else if (GeneralBuffer == descriptors_[start_idx]->descriptor_class) {
for (uint32_t i = 0; i < p_layout_->GetDescriptorCountFromBinding(binding); ++i) {
if (descriptors_[start_idx + i]->updated) {
buffer_set->insert(static_cast<BufferDescriptor *>(descriptors_[start_idx + i].get())->GetBuffer());
num_updates++;
}
}
}
}
}
return num_updates;
}
// Set is being deleted or updates so invalidate all bound cmd buffers
void cvdescriptorset::DescriptorSet::InvalidateBoundCmdBuffers() {
core_validation::InvalidateCommandBuffers(device_data_, cb_bindings, {HandleToUint64(set_), kVulkanObjectTypeDescriptorSet});
}
// Loop through the write updates to do for a push descriptor set, ignoring dstSet
void cvdescriptorset::DescriptorSet::PerformPushDescriptorsUpdate(uint32_t write_count, const VkWriteDescriptorSet *p_wds) {
assert(IsPushDescriptor());
for (uint32_t i = 0; i < write_count; i++) {
PerformWriteUpdate(&p_wds[i]);
}
}
// Perform write update in given update struct
void cvdescriptorset::DescriptorSet::PerformWriteUpdate(const VkWriteDescriptorSet *update) {
// Perform update on a per-binding basis as consecutive updates roll over to next binding
auto descriptors_remaining = update->descriptorCount;
auto binding_being_updated = update->dstBinding;
auto offset = update->dstArrayElement;
uint32_t update_index = 0;
while (descriptors_remaining) {
uint32_t update_count = std::min(descriptors_remaining, GetDescriptorCountFromBinding(binding_being_updated));
auto global_idx = p_layout_->GetGlobalIndexRangeFromBinding(binding_being_updated).start + offset;
// Loop over the updates for a single binding at a time
for (uint32_t di = 0; di < update_count; ++di, ++update_index) {
descriptors_[global_idx + di]->WriteUpdate(update, update_index);
}
// Roll over to next binding in case of consecutive update
descriptors_remaining -= update_count;
offset = 0;
binding_being_updated++;
}
if (update->descriptorCount) some_update_ = true;
if (!(p_layout_->GetDescriptorBindingFlagsFromBinding(update->dstBinding) &
(VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT))) {
InvalidateBoundCmdBuffers();
}
}
// Validate Copy update
bool cvdescriptorset::DescriptorSet::ValidateCopyUpdate(const debug_report_data *report_data, const VkCopyDescriptorSet *update,
const DescriptorSet *src_set, const char *func_name,
std::string *error_code, std::string *error_msg) {
// Verify dst layout still valid
if (p_layout_->IsDestroyed()) {
*error_code = "VUID-VkCopyDescriptorSet-dstSet-parameter";
string_sprintf(error_msg,
"Cannot call %s to perform copy update on descriptor set dstSet 0x%" PRIxLEAST64
" created with destroyed VkDescriptorSetLayout 0x%" PRIxLEAST64,
func_name, HandleToUint64(set_), HandleToUint64(p_layout_->GetDescriptorSetLayout()));
return false;
}
// Verify src layout still valid
if (src_set->p_layout_->IsDestroyed()) {
*error_code = "VUID-VkCopyDescriptorSet-srcSet-parameter";
string_sprintf(error_msg,
"Cannot call %s to perform copy update of dstSet 0x%" PRIxLEAST64
" from descriptor set srcSet 0x%" PRIxLEAST64
" created with destroyed VkDescriptorSetLayout 0x%" PRIxLEAST64,
func_name, HandleToUint64(set_), HandleToUint64(src_set->set_),
HandleToUint64(src_set->p_layout_->GetDescriptorSetLayout()));
return false;
}
if (!p_layout_->HasBinding(update->dstBinding)) {
*error_code = "VUID-VkCopyDescriptorSet-dstBinding-00347";
std::stringstream error_str;
error_str << "DescriptorSet " << set_ << " does not have copy update dest binding of " << update->dstBinding;
*error_msg = error_str.str();
return false;
}
if (!src_set->HasBinding(update->srcBinding)) {
*error_code = "VUID-VkCopyDescriptorSet-srcBinding-00345";
std::stringstream error_str;
error_str << "DescriptorSet " << set_ << " does not have copy update src binding of " << update->srcBinding;
*error_msg = error_str.str();
return false;
}
// Verify idle ds
if (in_use.load() &&
!(p_layout_->GetDescriptorBindingFlagsFromBinding(update->dstBinding) &
(VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT))) {
// TODO : Re-using Free Idle error code, need copy update idle error code
*error_code = "VUID-vkFreeDescriptorSets-pDescriptorSets-00309";
std::stringstream error_str;
error_str << "Cannot call " << func_name << " to perform copy update on descriptor set " << set_
<< " that is in use by a command buffer";
*error_msg = error_str.str();
return false;
}
// src & dst set bindings are valid
// Check bounds of src & dst
auto src_start_idx = src_set->GetGlobalIndexRangeFromBinding(update->srcBinding).start + update->srcArrayElement;
if ((src_start_idx + update->descriptorCount) > src_set->GetTotalDescriptorCount()) {
// SRC update out of bounds
*error_code = "VUID-VkCopyDescriptorSet-srcArrayElement-00346";
std::stringstream error_str;
error_str << "Attempting copy update from descriptorSet " << update->srcSet << " binding#" << update->srcBinding
<< " with offset index of " << src_set->GetGlobalIndexRangeFromBinding(update->srcBinding).start
<< " plus update array offset of " << update->srcArrayElement << " and update of " << update->descriptorCount
<< " descriptors oversteps total number of descriptors in set: " << src_set->GetTotalDescriptorCount();
*error_msg = error_str.str();
return false;
}
auto dst_start_idx = p_layout_->GetGlobalIndexRangeFromBinding(update->dstBinding).start + update->dstArrayElement;
if ((dst_start_idx + update->descriptorCount) > p_layout_->GetTotalDescriptorCount()) {
// DST update out of bounds
*error_code = "VUID-VkCopyDescriptorSet-dstArrayElement-00348";
std::stringstream error_str;
error_str << "Attempting copy update to descriptorSet " << set_ << " binding#" << update->dstBinding
<< " with offset index of " << p_layout_->GetGlobalIndexRangeFromBinding(update->dstBinding).start
<< " plus update array offset of " << update->dstArrayElement << " and update of " << update->descriptorCount
<< " descriptors oversteps total number of descriptors in set: " << p_layout_->GetTotalDescriptorCount();
*error_msg = error_str.str();
return false;
}
// Check that types match
// TODO : Base default error case going from here is "VUID-VkAcquireNextImageInfoKHR-semaphore-parameter"2ba which covers all
// consistency issues, need more fine-grained error codes
*error_code = "VUID-VkCopyDescriptorSet-srcSet-00349";
auto src_type = src_set->GetTypeFromBinding(update->srcBinding);
auto dst_type = p_layout_->GetTypeFromBinding(update->dstBinding);
if (src_type != dst_type) {
std::stringstream error_str;
error_str << "Attempting copy update to descriptorSet " << set_ << " binding #" << update->dstBinding << " with type "
<< string_VkDescriptorType(dst_type) << " from descriptorSet " << src_set->GetSet() << " binding #"
<< update->srcBinding << " with type " << string_VkDescriptorType(src_type) << ". Types do not match";
*error_msg = error_str.str();
return false;
}
// Verify consistency of src & dst bindings if update crosses binding boundaries
if ((!src_set->GetLayout()->VerifyUpdateConsistency(update->srcBinding, update->srcArrayElement, update->descriptorCount,
"copy update from", src_set->GetSet(), error_msg)) ||
(!p_layout_->VerifyUpdateConsistency(update->dstBinding, update->dstArrayElement, update->descriptorCount, "copy update to",
set_, error_msg))) {
return false;
}
if ((src_set->GetLayout()->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT) &&
!(GetLayout()->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
*error_code = "VUID-VkCopyDescriptorSet-srcSet-01918";
std::stringstream error_str;
error_str << "If pname:srcSet's (" << update->srcSet
<< ") layout was created with the "
"ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT flag "
"set, then pname:dstSet's ("
<< update->dstSet
<< ") layout must: also have been created with the "
"ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT flag set";
*error_msg = error_str.str();
return false;
}
if (!(src_set->GetLayout()->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT) &&
(GetLayout()->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
*error_code = "VUID-VkCopyDescriptorSet-srcSet-01919";
std::stringstream error_str;
error_str << "If pname:srcSet's (" << update->srcSet
<< ") layout was created without the "
"ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT flag "
"set, then pname:dstSet's ("
<< update->dstSet
<< ") layout must: also have been created without the "
"ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT flag set";
*error_msg = error_str.str();
return false;
}
if ((src_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT) &&
!(GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT)) {
*error_code = "VUID-VkCopyDescriptorSet-srcSet-01920";
std::stringstream error_str;
error_str << "If the descriptor pool from which pname:srcSet (" << update->srcSet
<< ") was allocated was created "
"with the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT flag "
"set, then the descriptor pool from which pname:dstSet ("
<< update->dstSet
<< ") was allocated must: "
"also have been created with the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT flag set";
*error_msg = error_str.str();
return false;
}
if (!(src_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT) &&
(GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT)) {
*error_code = "VUID-VkCopyDescriptorSet-srcSet-01921";
std::stringstream error_str;
error_str << "If the descriptor pool from which pname:srcSet (" << update->srcSet
<< ") was allocated was created "
"without the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT flag "
"set, then the descriptor pool from which pname:dstSet ("
<< update->dstSet
<< ") was allocated must: "
"also have been created without the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT flag set";
*error_msg = error_str.str();
return false;
}
if (src_type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
if ((update->srcArrayElement % 4) != 0) {
*error_code = "VUID-VkCopyDescriptorSet-srcBinding-02223";
std::stringstream error_str;
error_str << "Attempting copy update to VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT binding with "
<< "srcArrayElement " << update->srcArrayElement << " not a multiple of 4";
*error_msg = error_str.str();
return false;
}
if ((update->dstArrayElement % 4) != 0) {
*error_code = "VUID-VkCopyDescriptorSet-dstBinding-02224";
std::stringstream error_str;
error_str << "Attempting copy update to VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT binding with "
<< "dstArrayElement " << update->dstArrayElement << " not a multiple of 4";
*error_msg = error_str.str();
return false;
}
if ((update->descriptorCount % 4) != 0) {
*error_code = "VUID-VkCopyDescriptorSet-srcBinding-02225";
std::stringstream error_str;
error_str << "Attempting copy update to VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT binding with "
<< "descriptorCount " << update->descriptorCount << " not a multiple of 4";
*error_msg = error_str.str();
return false;
}
}
// Update parameters all look good and descriptor updated so verify update contents
if (!VerifyCopyUpdateContents(update, src_set, src_type, src_start_idx, func_name, error_code, error_msg)) return false;
// All checks passed so update is good
return true;
}
// Perform Copy update
void cvdescriptorset::DescriptorSet::PerformCopyUpdate(const VkCopyDescriptorSet *update, const DescriptorSet *src_set) {
auto src_start_idx = src_set->GetGlobalIndexRangeFromBinding(update->srcBinding).start + update->srcArrayElement;
auto dst_start_idx = p_layout_->GetGlobalIndexRangeFromBinding(update->dstBinding).start + update->dstArrayElement;
// Update parameters all look good so perform update
for (uint32_t di = 0; di < update->descriptorCount; ++di) {
auto src = src_set->descriptors_[src_start_idx + di].get();
auto dst = descriptors_[dst_start_idx + di].get();
if (src->updated) {
dst->CopyUpdate(src);
some_update_ = true;
} else {
dst->updated = false;
}
}
if (!(p_layout_->GetDescriptorBindingFlagsFromBinding(update->dstBinding) &
(VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT))) {
InvalidateBoundCmdBuffers();
}
}
// Update the drawing state for the affected descriptors.
// Set cb_node to this set and this set to cb_node.
// Add the bindings of the descriptor
// Set the layout based on the current descriptor layout (will mask subsequent layer mismatch errors)
// TODO: Modify the UpdateDrawState virtural functions to *only* set initial layout and not change layouts
// Prereq: This should be called for a set that has been confirmed to be active for the given cb_node, meaning it's going
// to be used in a draw by the given cb_node
void cvdescriptorset::DescriptorSet::UpdateDrawState(GLOBAL_CB_NODE *cb_node,
const std::map<uint32_t, descriptor_req> &binding_req_map) {
// bind cb to this descriptor set
cb_bindings.insert(cb_node);
// Add bindings for descriptor set, the set's pool, and individual objects in the set
cb_node->object_bindings.insert({HandleToUint64(set_), kVulkanObjectTypeDescriptorSet});
pool_state_->cb_bindings.insert(cb_node);
cb_node->object_bindings.insert({HandleToUint64(pool_state_->pool), kVulkanObjectTypeDescriptorPool});
// For the active slots, use set# to look up descriptorSet from boundDescriptorSets, and bind all of that descriptor set's
// resources
for (auto binding_req_pair : binding_req_map) {
auto binding = binding_req_pair.first;
auto range = p_layout_->GetGlobalIndexRangeFromBinding(binding);
for (uint32_t i = range.start; i < range.end; ++i) {
descriptors_[i]->UpdateDrawState(device_data_, cb_node);
}
}
}
void cvdescriptorset::DescriptorSet::FilterAndTrackOneBindingReq(const BindingReqMap::value_type &binding_req_pair,
const BindingReqMap &in_req, BindingReqMap *out_req,
TrackedBindings *bindings) {
assert(out_req);
assert(bindings);
const auto binding = binding_req_pair.first;
// Use insert and look at the boolean ("was inserted") in the returned pair to see if this is a new set member.
// Saves one hash lookup vs. find ... compare w/ end ... insert.
const auto it_bool_pair = bindings->insert(binding);
if (it_bool_pair.second) {
out_req->emplace(binding_req_pair);
}
}
void cvdescriptorset::DescriptorSet::FilterAndTrackOneBindingReq(const BindingReqMap::value_type &binding_req_pair,
const BindingReqMap &in_req, BindingReqMap *out_req,
TrackedBindings *bindings, uint32_t limit) {
if (bindings->size() < limit) FilterAndTrackOneBindingReq(binding_req_pair, in_req, out_req, bindings);
}
void cvdescriptorset::DescriptorSet::FilterAndTrackBindingReqs(GLOBAL_CB_NODE *cb_state, const BindingReqMap &in_req,
BindingReqMap *out_req) {
TrackedBindings &bound = cached_validation_[cb_state].command_binding_and_usage;
if (bound.size() == GetBindingCount()) {
return; // All bindings are bound, out req is empty
}
for (const auto &binding_req_pair : in_req) {
const auto binding = binding_req_pair.first;
// If a binding doesn't exist, or has already been bound, skip it
if (p_layout_->HasBinding(binding)) {
FilterAndTrackOneBindingReq(binding_req_pair, in_req, out_req, &bound);
}
}
}
void cvdescriptorset::DescriptorSet::FilterAndTrackBindingReqs(GLOBAL_CB_NODE *cb_state, PIPELINE_STATE *pipeline,
const BindingReqMap &in_req, BindingReqMap *out_req) {
auto &validated = cached_validation_[cb_state];
auto &image_sample_val = validated.image_samplers[pipeline];
auto *const dynamic_buffers = &validated.dynamic_buffers;
auto *const non_dynamic_buffers = &validated.non_dynamic_buffers;
const auto &stats = p_layout_->GetBindingTypeStats();
for (const auto &binding_req_pair : in_req) {
auto binding = binding_req_pair.first;
VkDescriptorSetLayoutBinding const *layout_binding = p_layout_->GetDescriptorSetLayoutBindingPtrFromBinding(binding);
if (!layout_binding) {
continue;
}
// Caching criteria differs per type.
// If image_layout have changed , the image descriptors need to be validated against them.
if ((layout_binding->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
(layout_binding->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
FilterAndTrackOneBindingReq(binding_req_pair, in_req, out_req, dynamic_buffers, stats.dynamic_buffer_count);
} else if ((layout_binding->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) ||
(layout_binding->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)) {
FilterAndTrackOneBindingReq(binding_req_pair, in_req, out_req, non_dynamic_buffers, stats.non_dynamic_buffer_count);
} else {
// This is rather crude, as the changed layouts may not impact the bound descriptors,
// but the simple "versioning" is a simple "dirt" test.
auto &version = image_sample_val[binding]; // Take advantage of default construtor zero initialzing new entries
if (version != cb_state->image_layout_change_count) {
version = cb_state->image_layout_change_count;
out_req->emplace(binding_req_pair);
}
}
}
}
cvdescriptorset::SamplerDescriptor::SamplerDescriptor(const VkSampler *immut) : sampler_(VK_NULL_HANDLE), immutable_(false) {
updated = false;
descriptor_class = PlainSampler;
if (immut) {
sampler_ = *immut;
immutable_ = true;
updated = true;
}
}
// Validate given sampler. Currently this only checks to make sure it exists in the samplerMap
bool cvdescriptorset::ValidateSampler(const VkSampler sampler, const layer_data *dev_data) {
return (GetSamplerState(dev_data, sampler) != nullptr);
}
bool cvdescriptorset::ValidateImageUpdate(VkImageView image_view, VkImageLayout image_layout, VkDescriptorType type,
const layer_data *dev_data, const char *func_name, std::string *error_code,
std::string *error_msg) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00326";
auto iv_state = GetImageViewState(dev_data, image_view);
if (!iv_state) {
std::stringstream error_str;
error_str << "Invalid VkImageView: " << image_view;
*error_msg = error_str.str();
return false;
}
// Note that when an imageview is created, we validated that memory is bound so no need to re-check here
// Validate that imageLayout is compatible with aspect_mask and image format
// and validate that image usage bits are correct for given usage
VkImageAspectFlags aspect_mask = iv_state->create_info.subresourceRange.aspectMask;
VkImage image = iv_state->create_info.image;
VkFormat format = VK_FORMAT_MAX_ENUM;
VkImageUsageFlags usage = 0;
auto image_node = GetImageState(dev_data, image);
if (image_node) {
format = image_node->createInfo.format;
usage = image_node->createInfo.usage;
// Validate that memory is bound to image
// TODO: This should have its own valid usage id apart from 2524 which is from CreateImageView case. The only
// the error here occurs is if memory bound to a created imageView has been freed.
if (ValidateMemoryIsBoundToImage(dev_data, image_node, func_name, "VUID-VkImageViewCreateInfo-image-01020")) {
*error_code = "VUID-VkImageViewCreateInfo-image-01020";
*error_msg = "No memory bound to image.";
return false;
}
// KHR_maintenance1 allows rendering into 2D or 2DArray views which slice a 3D image,
// but not binding them to descriptor sets.
if (image_node->createInfo.imageType == VK_IMAGE_TYPE_3D &&
(iv_state->create_info.viewType == VK_IMAGE_VIEW_TYPE_2D ||
iv_state->create_info.viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY)) {
*error_code = "VUID-VkDescriptorImageInfo-imageView-00343";
*error_msg = "ImageView must not be a 2D or 2DArray view of a 3D image";
return false;
}
}
// First validate that format and layout are compatible
if (format == VK_FORMAT_MAX_ENUM) {
std::stringstream error_str;
error_str << "Invalid image (" << image << ") in imageView (" << image_view << ").";
*error_msg = error_str.str();
return false;
}
// TODO : The various image aspect and format checks here are based on general spec language in 11.5 Image Views section under
// vkCreateImageView(). What's the best way to create unique id for these cases?
bool ds = FormatIsDepthOrStencil(format);
switch (image_layout) {
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
// Only Color bit must be set
if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
std::stringstream error_str;
error_str
<< "ImageView (" << image_view
<< ") uses layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL but does not have VK_IMAGE_ASPECT_COLOR_BIT set.";
*error_msg = error_str.str();
return false;
}
// format must NOT be DS
if (ds) {
std::stringstream error_str;
error_str << "ImageView (" << image_view
<< ") uses layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL but the image format is "
<< string_VkFormat(format) << " which is not a color format.";
*error_msg = error_str.str();
return false;
}
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
// Depth or stencil bit must be set, but both must NOT be set
if (aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) {
if (aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) {
// both must NOT be set
std::stringstream error_str;
error_str << "ImageView (" << image_view << ") has both STENCIL and DEPTH aspects set";
*error_msg = error_str.str();
return false;
}
} else if (!(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
// Neither were set
std::stringstream error_str;
error_str << "ImageView (" << image_view << ") has layout " << string_VkImageLayout(image_layout)
<< " but does not have STENCIL or DEPTH aspects set";
*error_msg = error_str.str();
return false;
}
// format must be DS
if (!ds) {
std::stringstream error_str;
error_str << "ImageView (" << image_view << ") has layout " << string_VkImageLayout(image_layout)
<< " but the image format is " << string_VkFormat(format) << " which is not a depth/stencil format.";
*error_msg = error_str.str();
return false;
}
break;
default:
// For other layouts if the source is depth/stencil image, both aspect bits must not be set
if (ds) {
if (aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) {
if (aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) {
// both must NOT be set
std::stringstream error_str;
error_str << "ImageView (" << image_view << ") has layout " << string_VkImageLayout(image_layout)
<< " and is using depth/stencil image of format " << string_VkFormat(format)
<< " but it has both STENCIL and DEPTH aspects set, which is illegal. When using a depth/stencil "
"image in a descriptor set, please only set either VK_IMAGE_ASPECT_DEPTH_BIT or "
"VK_IMAGE_ASPECT_STENCIL_BIT depending on whether it will be used for depth reads or stencil "
"reads respectively.";
*error_msg = error_str.str();
return false;
}
}
}
break;
}
// Now validate that usage flags are correctly set for given type of update
// As we're switching per-type, if any type has specific layout requirements, check those here as well
// TODO : The various image usage bit requirements are in general spec language for VkImageUsageFlags bit block in 11.3 Images
// under vkCreateImage()
// TODO : Need to also validate case "VUID-VkWriteDescriptorSet-descriptorType-00336" where STORAGE_IMAGE & INPUT_ATTACH types
// must have been created with identify swizzle
std::string error_usage_bit;
switch (type) {
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
if (!(usage & VK_IMAGE_USAGE_SAMPLED_BIT)) {
error_usage_bit = "VK_IMAGE_USAGE_SAMPLED_BIT";
}
break;
}
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
if (!(usage & VK_IMAGE_USAGE_STORAGE_BIT)) {
error_usage_bit = "VK_IMAGE_USAGE_STORAGE_BIT";
} else if (VK_IMAGE_LAYOUT_GENERAL != image_layout) {
std::stringstream error_str;
// TODO : Need to create custom enum error codes for these cases
if (image_node->shared_presentable) {
if (VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR != image_layout) {
error_str << "ImageView (" << image_view
<< ") of VK_DESCRIPTOR_TYPE_STORAGE_IMAGE type with a front-buffered image is being updated with "
"layout "
<< string_VkImageLayout(image_layout)
<< " but according to spec section 13.1 Descriptor Types, 'Front-buffered images that report "
"support for VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT must be in the "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR layout.'";
*error_msg = error_str.str();
return false;
}
} else if (VK_IMAGE_LAYOUT_GENERAL != image_layout) {
error_str << "ImageView (" << image_view
<< ") of VK_DESCRIPTOR_TYPE_STORAGE_IMAGE type is being updated with layout "
<< string_VkImageLayout(image_layout)
<< " but according to spec section 13.1 Descriptor Types, 'Load and store operations on storage "
"images can only be done on images in VK_IMAGE_LAYOUT_GENERAL layout.'";
*error_msg = error_str.str();
return false;
}
}
break;
}
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: {
if (!(usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) {
error_usage_bit = "VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT";
}
break;
}
default:
break;
}
if (!error_usage_bit.empty()) {
std::stringstream error_str;
error_str << "ImageView (" << image_view << ") with usage mask 0x" << usage
<< " being used for a descriptor update of type " << string_VkDescriptorType(type) << " does not have "
<< error_usage_bit << " set.";
*error_msg = error_str.str();
return false;
}
return true;
}
void cvdescriptorset::SamplerDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) {
if (!immutable_) {
sampler_ = update->pImageInfo[index].sampler;
}
updated = true;
}
void cvdescriptorset::SamplerDescriptor::CopyUpdate(const Descriptor *src) {
if (!immutable_) {
auto update_sampler = static_cast<const SamplerDescriptor *>(src)->sampler_;
sampler_ = update_sampler;
}
updated = true;
}
void cvdescriptorset::SamplerDescriptor::UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
if (!immutable_) {
auto sampler_state = GetSamplerState(dev_data, sampler_);
if (sampler_state) core_validation::AddCommandBufferBindingSampler(cb_node, sampler_state);
}
}
cvdescriptorset::ImageSamplerDescriptor::ImageSamplerDescriptor(const VkSampler *immut)
: sampler_(VK_NULL_HANDLE), immutable_(false), image_view_(VK_NULL_HANDLE), image_layout_(VK_IMAGE_LAYOUT_UNDEFINED) {
updated = false;
descriptor_class = ImageSampler;
if (immut) {
sampler_ = *immut;
immutable_ = true;
}
}
void cvdescriptorset::ImageSamplerDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) {
updated = true;
const auto &image_info = update->pImageInfo[index];
if (!immutable_) {
sampler_ = image_info.sampler;
}
image_view_ = image_info.imageView;
image_layout_ = image_info.imageLayout;
}
void cvdescriptorset::ImageSamplerDescriptor::CopyUpdate(const Descriptor *src) {
if (!immutable_) {
auto update_sampler = static_cast<const ImageSamplerDescriptor *>(src)->sampler_;
sampler_ = update_sampler;
}
auto image_view = static_cast<const ImageSamplerDescriptor *>(src)->image_view_;
auto image_layout = static_cast<const ImageSamplerDescriptor *>(src)->image_layout_;
updated = true;
image_view_ = image_view;
image_layout_ = image_layout;
}
void cvdescriptorset::ImageSamplerDescriptor::UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
// First add binding for any non-immutable sampler
if (!immutable_) {
auto sampler_state = GetSamplerState(dev_data, sampler_);
if (sampler_state) core_validation::AddCommandBufferBindingSampler(cb_node, sampler_state);
}
// Add binding for image
auto iv_state = GetImageViewState(dev_data, image_view_);
if (iv_state) {
core_validation::AddCommandBufferBindingImageView(dev_data, cb_node, iv_state);
}
SetImageViewLayout(dev_data, cb_node, image_view_, image_layout_);
}
cvdescriptorset::ImageDescriptor::ImageDescriptor(const VkDescriptorType type)
: storage_(false), image_view_(VK_NULL_HANDLE), image_layout_(VK_IMAGE_LAYOUT_UNDEFINED) {
updated = false;
descriptor_class = Image;
if (VK_DESCRIPTOR_TYPE_STORAGE_IMAGE == type) storage_ = true;
}
void cvdescriptorset::ImageDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) {
updated = true;
const auto &image_info = update->pImageInfo[index];
image_view_ = image_info.imageView;
image_layout_ = image_info.imageLayout;
}
void cvdescriptorset::ImageDescriptor::CopyUpdate(const Descriptor *src) {
auto image_view = static_cast<const ImageDescriptor *>(src)->image_view_;
auto image_layout = static_cast<const ImageDescriptor *>(src)->image_layout_;
updated = true;
image_view_ = image_view;
image_layout_ = image_layout;
}
void cvdescriptorset::ImageDescriptor::UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
// Add binding for image
auto iv_state = GetImageViewState(dev_data, image_view_);
if (iv_state) {
core_validation::AddCommandBufferBindingImageView(dev_data, cb_node, iv_state);
}
SetImageViewLayout(dev_data, cb_node, image_view_, image_layout_);
}
cvdescriptorset::BufferDescriptor::BufferDescriptor(const VkDescriptorType type)
: storage_(false), dynamic_(false), buffer_(VK_NULL_HANDLE), offset_(0), range_(0) {
updated = false;
descriptor_class = GeneralBuffer;
if (VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC == type) {
dynamic_ = true;
} else if (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER == type) {
storage_ = true;
} else if (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC == type) {
dynamic_ = true;
storage_ = true;
}
}
void cvdescriptorset::BufferDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) {
updated = true;
const auto &buffer_info = update->pBufferInfo[index];
buffer_ = buffer_info.buffer;
offset_ = buffer_info.offset;
range_ = buffer_info.range;
}
void cvdescriptorset::BufferDescriptor::CopyUpdate(const Descriptor *src) {
auto buff_desc = static_cast<const BufferDescriptor *>(src);
updated = true;
buffer_ = buff_desc->buffer_;
offset_ = buff_desc->offset_;
range_ = buff_desc->range_;
}
void cvdescriptorset::BufferDescriptor::UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
auto buffer_node = GetBufferState(dev_data, buffer_);
if (buffer_node) core_validation::AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_node);
}
cvdescriptorset::TexelDescriptor::TexelDescriptor(const VkDescriptorType type) : buffer_view_(VK_NULL_HANDLE), storage_(false) {
updated = false;
descriptor_class = TexelBuffer;
if (VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER == type) storage_ = true;
}
void cvdescriptorset::TexelDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) {
updated = true;
buffer_view_ = update->pTexelBufferView[index];
}
void cvdescriptorset::TexelDescriptor::CopyUpdate(const Descriptor *src) {
updated = true;
buffer_view_ = static_cast<const TexelDescriptor *>(src)->buffer_view_;
}
void cvdescriptorset::TexelDescriptor::UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
auto bv_state = GetBufferViewState(dev_data, buffer_view_);
if (bv_state) {
core_validation::AddCommandBufferBindingBufferView(dev_data, cb_node, bv_state);
}
}
// This is a helper function that iterates over a set of Write and Copy updates, pulls the DescriptorSet* for updated
// sets, and then calls their respective Validate[Write|Copy]Update functions.
// If the update hits an issue for which the callback returns "true", meaning that the call down the chain should
// be skipped, then true is returned.
// If there is no issue with the update, then false is returned.
bool cvdescriptorset::ValidateUpdateDescriptorSets(const debug_report_data *report_data, const layer_data *dev_data,
uint32_t write_count, const VkWriteDescriptorSet *p_wds, uint32_t copy_count,
const VkCopyDescriptorSet *p_cds, const char *func_name) {
bool skip = false;
// Validate Write updates
for (uint32_t i = 0; i < write_count; i++) {
auto dest_set = p_wds[i].dstSet;
auto set_node = core_validation::GetSetNode(dev_data, dest_set);
if (!set_node) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(dest_set), kVUID_Core_DrawState_InvalidDescriptorSet,
"Cannot call %s on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_name,
HandleToUint64(dest_set));
} else {
std::string error_code;
std::string error_str;
if (!set_node->ValidateWriteUpdate(report_data, &p_wds[i], func_name, &error_code, &error_str)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(dest_set), error_code,
"%s failed write update validation for Descriptor Set 0x%" PRIx64 " with error: %s.", func_name,
HandleToUint64(dest_set), error_str.c_str());
}
}
}
// Now validate copy updates
for (uint32_t i = 0; i < copy_count; ++i) {
auto dst_set = p_cds[i].dstSet;
auto src_set = p_cds[i].srcSet;
auto src_node = core_validation::GetSetNode(dev_data, src_set);
auto dst_node = core_validation::GetSetNode(dev_data, dst_set);
// Object_tracker verifies that src & dest descriptor set are valid
assert(src_node);
assert(dst_node);
std::string error_code;
std::string error_str;
if (!dst_node->ValidateCopyUpdate(report_data, &p_cds[i], src_node, func_name, &error_code, &error_str)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(dst_set), error_code,
"%s failed copy update from Descriptor Set 0x%" PRIx64 " to Descriptor Set 0x%" PRIx64 " with error: %s.",
func_name, HandleToUint64(src_set), HandleToUint64(dst_set), error_str.c_str());
}
}
return skip;
}
// This is a helper function that iterates over a set of Write and Copy updates, pulls the DescriptorSet* for updated
// sets, and then calls their respective Perform[Write|Copy]Update functions.
// Prerequisite : ValidateUpdateDescriptorSets() should be called and return "false" prior to calling PerformUpdateDescriptorSets()
// with the same set of updates.
// This is split from the validate code to allow validation prior to calling down the chain, and then update after
// calling down the chain.
void cvdescriptorset::PerformUpdateDescriptorSets(const layer_data *dev_data, uint32_t write_count,
const VkWriteDescriptorSet *p_wds, uint32_t copy_count,
const VkCopyDescriptorSet *p_cds) {
// Write updates first
uint32_t i = 0;
for (i = 0; i < write_count; ++i) {
auto dest_set = p_wds[i].dstSet;
auto set_node = core_validation::GetSetNode(dev_data, dest_set);
if (set_node) {
set_node->PerformWriteUpdate(&p_wds[i]);
}
}
// Now copy updates
for (i = 0; i < copy_count; ++i) {
auto dst_set = p_cds[i].dstSet;
auto src_set = p_cds[i].srcSet;
auto src_node = core_validation::GetSetNode(dev_data, src_set);
auto dst_node = core_validation::GetSetNode(dev_data, dst_set);
if (src_node && dst_node) {
dst_node->PerformCopyUpdate(&p_cds[i], src_node);
}
}
}
cvdescriptorset::DecodedTemplateUpdate::DecodedTemplateUpdate(layer_data *device_data, VkDescriptorSet descriptorSet,
const TEMPLATE_STATE *template_state, const void *pData,
VkDescriptorSetLayout push_layout) {
auto const &create_info = template_state->create_info;
inline_infos.resize(create_info.descriptorUpdateEntryCount); // Make sure we have one if we need it
desc_writes.reserve(create_info.descriptorUpdateEntryCount); // emplaced, so reserved without initialization
VkDescriptorSetLayout effective_dsl = create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET
? create_info.descriptorSetLayout
: push_layout;
auto layout_obj = GetDescriptorSetLayout(device_data, effective_dsl);
// Create a WriteDescriptorSet struct for each template update entry
for (uint32_t i = 0; i < create_info.descriptorUpdateEntryCount; i++) {
auto binding_count = layout_obj->GetDescriptorCountFromBinding(create_info.pDescriptorUpdateEntries[i].dstBinding);
auto binding_being_updated = create_info.pDescriptorUpdateEntries[i].dstBinding;
auto dst_array_element = create_info.pDescriptorUpdateEntries[i].dstArrayElement;
desc_writes.reserve(desc_writes.size() + create_info.pDescriptorUpdateEntries[i].descriptorCount);
for (uint32_t j = 0; j < create_info.pDescriptorUpdateEntries[i].descriptorCount; j++) {
desc_writes.emplace_back();
auto &write_entry = desc_writes.back();
size_t offset = create_info.pDescriptorUpdateEntries[i].offset + j * create_info.pDescriptorUpdateEntries[i].stride;
char *update_entry = (char *)(pData) + offset;
if (dst_array_element >= binding_count) {
dst_array_element = 0;
binding_being_updated = layout_obj->GetNextValidBinding(binding_being_updated);
}
write_entry.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write_entry.pNext = NULL;
write_entry.dstSet = descriptorSet;
write_entry.dstBinding = binding_being_updated;
write_entry.dstArrayElement = dst_array_element;
write_entry.descriptorCount = 1;
write_entry.descriptorType = create_info.pDescriptorUpdateEntries[i].descriptorType;
switch (create_info.pDescriptorUpdateEntries[i].descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
write_entry.pImageInfo = reinterpret_cast<VkDescriptorImageInfo *>(update_entry);
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
write_entry.pBufferInfo = reinterpret_cast<VkDescriptorBufferInfo *>(update_entry);
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
write_entry.pTexelBufferView = reinterpret_cast<VkBufferView *>(update_entry);
break;
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: {
VkWriteDescriptorSetInlineUniformBlockEXT *inline_info = &inline_infos[i];
inline_info->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT;
inline_info->pNext = nullptr;
inline_info->dataSize = create_info.pDescriptorUpdateEntries[i].descriptorCount;
inline_info->pData = update_entry;
write_entry.pNext = inline_info;
// skip the rest of the array, they just represent bytes in the update
j = create_info.pDescriptorUpdateEntries[i].descriptorCount;
break;
}
default:
assert(0);
break;
}
dst_array_element++;
}
}
}
// These helper functions carry out the validate and record descriptor updates peformed via update templates. They decode
// the templatized data and leverage the non-template UpdateDescriptor helper functions.
bool cvdescriptorset::ValidateUpdateDescriptorSetsWithTemplateKHR(layer_data *device_data, VkDescriptorSet descriptorSet,
const TEMPLATE_STATE *template_state, const void *pData) {
// Translate the templated update into a normal update for validation...
DecodedTemplateUpdate decoded_update(device_data, descriptorSet, template_state, pData);
return ValidateUpdateDescriptorSets(GetReportData(device_data), device_data,
static_cast<uint32_t>(decoded_update.desc_writes.size()), decoded_update.desc_writes.data(),
0, NULL, "vkUpdateDescriptorSetWithTemplate()");
}
void cvdescriptorset::PerformUpdateDescriptorSetsWithTemplateKHR(layer_data *device_data, VkDescriptorSet descriptorSet,
const TEMPLATE_STATE *template_state, const void *pData) {
// Translate the templated update into a normal update for validation...
DecodedTemplateUpdate decoded_update(device_data, descriptorSet, template_state, pData);
PerformUpdateDescriptorSets(device_data, static_cast<uint32_t>(decoded_update.desc_writes.size()),
decoded_update.desc_writes.data(), 0, NULL);
}
std::string cvdescriptorset::DescriptorSet::StringifySetAndLayout() const {
std::string out;
uint64_t layout_handle = HandleToUint64(p_layout_->GetDescriptorSetLayout());
if (IsPushDescriptor()) {
string_sprintf(&out, "Push Descriptors defined with VkDescriptorSetLayout 0x%" PRIxLEAST64, layout_handle);
} else {
string_sprintf(&out, "VkDescriptorSet 0x%" PRIxLEAST64 "allocated with VkDescriptorSetLayout 0x%" PRIxLEAST64,
HandleToUint64(set_), layout_handle);
}
return out;
};
// Loop through the write updates to validate for a push descriptor set, ignoring dstSet
bool cvdescriptorset::DescriptorSet::ValidatePushDescriptorsUpdate(const debug_report_data *report_data, uint32_t write_count,
const VkWriteDescriptorSet *p_wds, const char *func_name) {
assert(IsPushDescriptor());
bool skip = false;
for (uint32_t i = 0; i < write_count; i++) {
std::string error_code;
std::string error_str;
if (!ValidateWriteUpdate(report_data, &p_wds[i], func_name, &error_code, &error_str)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
HandleToUint64(p_layout_->GetDescriptorSetLayout()), error_code, "%s failed update validation: %s.",
func_name, error_str.c_str());
}
}
return skip;
}
// Validate the state for a given write update but don't actually perform the update
// If an error would occur for this update, return false and fill in details in error_msg string
bool cvdescriptorset::DescriptorSet::ValidateWriteUpdate(const debug_report_data *report_data, const VkWriteDescriptorSet *update,
const char *func_name, std::string *error_code, std::string *error_msg) {
// Verify dst layout still valid
if (p_layout_->IsDestroyed()) {
*error_code = "VUID-VkWriteDescriptorSet-dstSet-00320";
string_sprintf(error_msg, "Cannot call %s to perform write update on %s which has been destroyed", func_name,
StringifySetAndLayout().c_str());
return false;
}
// Verify dst binding exists
if (!p_layout_->HasBinding(update->dstBinding)) {
*error_code = "VUID-VkWriteDescriptorSet-dstBinding-00315";
std::stringstream error_str;
error_str << StringifySetAndLayout() << " does not have binding " << update->dstBinding;
*error_msg = error_str.str();
return false;
} else {
// Make sure binding isn't empty
if (0 == p_layout_->GetDescriptorCountFromBinding(update->dstBinding)) {
*error_code = "VUID-VkWriteDescriptorSet-dstBinding-00316";
std::stringstream error_str;
error_str << StringifySetAndLayout() << " cannot updated binding " << update->dstBinding << " that has 0 descriptors";
*error_msg = error_str.str();
return false;
}
}
// Verify idle ds
if (in_use.load() &&
!(p_layout_->GetDescriptorBindingFlagsFromBinding(update->dstBinding) &
(VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT))) {
// TODO : Re-using Free Idle error code, need write update idle error code
*error_code = "VUID-vkFreeDescriptorSets-pDescriptorSets-00309";
std::stringstream error_str;
error_str << "Cannot call " << func_name << " to perform write update on " << StringifySetAndLayout()
<< " that is in use by a command buffer";
*error_msg = error_str.str();
return false;
}
// We know that binding is valid, verify update and do update on each descriptor
auto start_idx = p_layout_->GetGlobalIndexRangeFromBinding(update->dstBinding).start + update->dstArrayElement;
auto type = p_layout_->GetTypeFromBinding(update->dstBinding);
if (type != update->descriptorType) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00319";
std::stringstream error_str;
error_str << "Attempting write update to " << StringifySetAndLayout() << " binding #" << update->dstBinding << " with type "
<< string_VkDescriptorType(type) << " but update type is " << string_VkDescriptorType(update->descriptorType);
*error_msg = error_str.str();
return false;
}
if (update->descriptorCount > (descriptors_.size() - start_idx)) {
*error_code = "VUID-VkWriteDescriptorSet-dstArrayElement-00321";
std::stringstream error_str;
error_str << "Attempting write update to " << StringifySetAndLayout() << " binding #" << update->dstBinding << " with "
<< descriptors_.size() - start_idx
<< " descriptors in that binding and all successive bindings of the set, but update of "
<< update->descriptorCount << " descriptors combined with update array element offset of "
<< update->dstArrayElement << " oversteps the available number of consecutive descriptors";
*error_msg = error_str.str();
return false;
}
if (type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
if ((update->dstArrayElement % 4) != 0) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-02219";
std::stringstream error_str;
error_str << "Attempting write update to " << StringifySetAndLayout() << " binding #" << update->dstBinding << " with "
<< "dstArrayElement " << update->dstArrayElement << " not a multiple of 4";
*error_msg = error_str.str();
return false;
}
if ((update->descriptorCount % 4) != 0) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-02220";
std::stringstream error_str;
error_str << "Attempting write update to " << StringifySetAndLayout() << " binding #" << update->dstBinding << " with "
<< "descriptorCount " << update->descriptorCount << " not a multiple of 4";
*error_msg = error_str.str();
return false;
}
const auto *write_inline_info = lvl_find_in_chain<VkWriteDescriptorSetInlineUniformBlockEXT>(update->pNext);
if (!write_inline_info || write_inline_info->dataSize != update->descriptorCount) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-02221";
std::stringstream error_str;
if (!write_inline_info) {
error_str << "Attempting write update to " << StringifySetAndLayout() << " binding #" << update->dstBinding
<< " with "
<< "VkWriteDescriptorSetInlineUniformBlockEXT missing";
} else {
error_str << "Attempting write update to " << StringifySetAndLayout() << " binding #" << update->dstBinding
<< " with "
<< "VkWriteDescriptorSetInlineUniformBlockEXT dataSize " << write_inline_info->dataSize
<< " not equal to "
<< "VkWriteDescriptorSet descriptorCount " << update->descriptorCount;
}
*error_msg = error_str.str();
return false;
}
// This error is probably unreachable due to the previous two errors
if (write_inline_info && (write_inline_info->dataSize % 4) != 0) {
*error_code = "VUID-VkWriteDescriptorSetInlineUniformBlockEXT-dataSize-02222";
std::stringstream error_str;
error_str << "Attempting write update to " << StringifySetAndLayout() << " binding #" << update->dstBinding << " with "
<< "VkWriteDescriptorSetInlineUniformBlockEXT dataSize " << write_inline_info->dataSize
<< " not a multiple of 4";
*error_msg = error_str.str();
return false;
}
}
// Verify consecutive bindings match (if needed)
if (!p_layout_->VerifyUpdateConsistency(update->dstBinding, update->dstArrayElement, update->descriptorCount, "write update to",
set_, error_msg)) {
// TODO : Should break out "consecutive binding updates" language into valid usage statements
*error_code = "VUID-VkWriteDescriptorSet-dstArrayElement-00321";
return false;
}
// Update is within bounds and consistent so last step is to validate update contents
if (!VerifyWriteUpdateContents(update, start_idx, func_name, error_code, error_msg)) {
std::stringstream error_str;
error_str << "Write update to " << StringifySetAndLayout() << " binding #" << update->dstBinding
<< " failed with error message: " << error_msg->c_str();
*error_msg = error_str.str();
return false;
}
// All checks passed, update is clean
return true;
}
// For the given buffer, verify that its creation parameters are appropriate for the given type
// If there's an error, update the error_msg string with details and return false, else return true
bool cvdescriptorset::DescriptorSet::ValidateBufferUsage(BUFFER_STATE const *buffer_node, VkDescriptorType type,
std::string *error_code, std::string *error_msg) const {
// Verify that usage bits set correctly for given type
auto usage = buffer_node->createInfo.usage;
std::string error_usage_bit;
switch (type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
if (!(usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT)) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00334";
error_usage_bit = "VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT";
}
break;
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
if (!(usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00335";
error_usage_bit = "VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT";
}
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
if (!(usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00330";
error_usage_bit = "VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT";
}
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
if (!(usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT)) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00331";
error_usage_bit = "VK_BUFFER_USAGE_STORAGE_BUFFER_BIT";
}
break;
default:
break;
}
if (!error_usage_bit.empty()) {
std::stringstream error_str;
error_str << "Buffer (" << buffer_node->buffer << ") with usage mask 0x" << usage
<< " being used for a descriptor update of type " << string_VkDescriptorType(type) << " does not have "
<< error_usage_bit << " set.";
*error_msg = error_str.str();
return false;
}
return true;
}
// For buffer descriptor updates, verify the buffer usage and VkDescriptorBufferInfo struct which includes:
// 1. buffer is valid
// 2. buffer was created with correct usage flags
// 3. offset is less than buffer size
// 4. range is either VK_WHOLE_SIZE or falls in (0, (buffer size - offset)]
// 5. range and offset are within the device's limits
// If there's an error, update the error_msg string with details and return false, else return true
bool cvdescriptorset::DescriptorSet::ValidateBufferUpdate(VkDescriptorBufferInfo const *buffer_info, VkDescriptorType type,
const char *func_name, std::string *error_code,
std::string *error_msg) const {
// First make sure that buffer is valid
auto buffer_node = GetBufferState(device_data_, buffer_info->buffer);
// Any invalid buffer should already be caught by object_tracker
assert(buffer_node);
if (ValidateMemoryIsBoundToBuffer(device_data_, buffer_node, func_name, "VUID-VkWriteDescriptorSet-descriptorType-00329")) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00329";
*error_msg = "No memory bound to buffer.";
return false;
}
// Verify usage bits
if (!ValidateBufferUsage(buffer_node, type, error_code, error_msg)) {
// error_msg will have been updated by ValidateBufferUsage()
return false;
}
// offset must be less than buffer size
if (buffer_info->offset >= buffer_node->createInfo.size) {
*error_code = "VUID-VkDescriptorBufferInfo-offset-00340";
std::stringstream error_str;
error_str << "VkDescriptorBufferInfo offset of " << buffer_info->offset << " is greater than or equal to buffer "
<< buffer_node->buffer << " size of " << buffer_node->createInfo.size;
*error_msg = error_str.str();
return false;
}
if (buffer_info->range != VK_WHOLE_SIZE) {
// Range must be VK_WHOLE_SIZE or > 0
if (!buffer_info->range) {
*error_code = "VUID-VkDescriptorBufferInfo-range-00341";
std::stringstream error_str;
error_str << "VkDescriptorBufferInfo range is not VK_WHOLE_SIZE and is zero, which is not allowed.";
*error_msg = error_str.str();
return false;
}
// Range must be VK_WHOLE_SIZE or <= (buffer size - offset)
if (buffer_info->range > (buffer_node->createInfo.size - buffer_info->offset)) {
*error_code = "VUID-VkDescriptorBufferInfo-range-00342";
std::stringstream error_str;
error_str << "VkDescriptorBufferInfo range is " << buffer_info->range << " which is greater than buffer size ("
<< buffer_node->createInfo.size << ") minus requested offset of " << buffer_info->offset;
*error_msg = error_str.str();
return false;
}
}
// Check buffer update sizes against device limits
if (VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == type || VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC == type) {
auto max_ub_range = limits_.maxUniformBufferRange;
if (buffer_info->range != VK_WHOLE_SIZE && buffer_info->range > max_ub_range) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00332";
std::stringstream error_str;
error_str << "VkDescriptorBufferInfo range is " << buffer_info->range
<< " which is greater than this device's maxUniformBufferRange (" << max_ub_range << ")";
*error_msg = error_str.str();
return false;
} else if (buffer_info->range == VK_WHOLE_SIZE && (buffer_node->createInfo.size - buffer_info->offset) > max_ub_range) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00332";
std::stringstream error_str;
error_str << "VkDescriptorBufferInfo range is VK_WHOLE_SIZE but effective range "
<< "(" << (buffer_node->createInfo.size - buffer_info->offset) << ") is greater than this device's "
<< "maxUniformBufferRange (" << max_ub_range << ")";
*error_msg = error_str.str();
return false;
}
} else if (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER == type || VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC == type) {
auto max_sb_range = limits_.maxStorageBufferRange;
if (buffer_info->range != VK_WHOLE_SIZE && buffer_info->range > max_sb_range) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00333";
std::stringstream error_str;
error_str << "VkDescriptorBufferInfo range is " << buffer_info->range
<< " which is greater than this device's maxStorageBufferRange (" << max_sb_range << ")";
*error_msg = error_str.str();
return false;
} else if (buffer_info->range == VK_WHOLE_SIZE && (buffer_node->createInfo.size - buffer_info->offset) > max_sb_range) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00333";
std::stringstream error_str;
error_str << "VkDescriptorBufferInfo range is VK_WHOLE_SIZE but effective range "
<< "(" << (buffer_node->createInfo.size - buffer_info->offset) << ") is greater than this device's "
<< "maxStorageBufferRange (" << max_sb_range << ")";
*error_msg = error_str.str();
return false;
}
}
return true;
}
// Verify that the contents of the update are ok, but don't perform actual update
bool cvdescriptorset::DescriptorSet::VerifyWriteUpdateContents(const VkWriteDescriptorSet *update, const uint32_t index,
const char *func_name, std::string *error_code,
std::string *error_msg) const {
switch (update->descriptorType) {
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
for (uint32_t di = 0; di < update->descriptorCount; ++di) {
// Validate image
auto image_view = update->pImageInfo[di].imageView;
auto image_layout = update->pImageInfo[di].imageLayout;
if (!ValidateImageUpdate(image_view, image_layout, update->descriptorType, device_data_, func_name, error_code,
error_msg)) {
std::stringstream error_str;
error_str << "Attempted write update to combined image sampler descriptor failed due to: "
<< error_msg->c_str();
*error_msg = error_str.str();
return false;
}
}
}
// fall through
case VK_DESCRIPTOR_TYPE_SAMPLER: {
for (uint32_t di = 0; di < update->descriptorCount; ++di) {
if (!descriptors_[index + di].get()->IsImmutableSampler()) {
if (!ValidateSampler(update->pImageInfo[di].sampler, device_data_)) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00325";
std::stringstream error_str;
error_str << "Attempted write update to sampler descriptor with invalid sampler: "
<< update->pImageInfo[di].sampler << ".";
*error_msg = error_str.str();
return false;
}
} else {
// TODO : Warn here
}
}
break;
}
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
for (uint32_t di = 0; di < update->descriptorCount; ++di) {
auto image_view = update->pImageInfo[di].imageView;
auto image_layout = update->pImageInfo[di].imageLayout;
if (!ValidateImageUpdate(image_view, image_layout, update->descriptorType, device_data_, func_name, error_code,
error_msg)) {
std::stringstream error_str;
error_str << "Attempted write update to image descriptor failed due to: " << error_msg->c_str();
*error_msg = error_str.str();
return false;
}
}
break;
}
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
for (uint32_t di = 0; di < update->descriptorCount; ++di) {
auto buffer_view = update->pTexelBufferView[di];
auto bv_state = GetBufferViewState(device_data_, buffer_view);
if (!bv_state) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00323";
std::stringstream error_str;
error_str << "Attempted write update to texel buffer descriptor with invalid buffer view: " << buffer_view;
*error_msg = error_str.str();
return false;
}
auto buffer = bv_state->create_info.buffer;
auto buffer_state = GetBufferState(device_data_, buffer);
// Verify that buffer underlying the view hasn't been destroyed prematurely
if (!buffer_state) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00323";
std::stringstream error_str;
error_str << "Attempted write update to texel buffer descriptor failed because underlying buffer (" << buffer
<< ") has been destroyed: " << error_msg->c_str();
*error_msg = error_str.str();
return false;
} else if (!ValidateBufferUsage(buffer_state, update->descriptorType, error_code, error_msg)) {
std::stringstream error_str;
error_str << "Attempted write update to texel buffer descriptor failed due to: " << error_msg->c_str();
*error_msg = error_str.str();
return false;
}
}
break;
}
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
for (uint32_t di = 0; di < update->descriptorCount; ++di) {
if (!ValidateBufferUpdate(update->pBufferInfo + di, update->descriptorType, func_name, error_code, error_msg)) {
std::stringstream error_str;
error_str << "Attempted write update to buffer descriptor failed due to: " << error_msg->c_str();
*error_msg = error_str.str();
return false;
}
}
break;
}
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
break;
case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV:
// XXX TODO
break;
default:
assert(0); // We've already verified update type so should never get here
break;
}
// All checks passed so update contents are good
return true;
}
// Verify that the contents of the update are ok, but don't perform actual update
bool cvdescriptorset::DescriptorSet::VerifyCopyUpdateContents(const VkCopyDescriptorSet *update, const DescriptorSet *src_set,
VkDescriptorType type, uint32_t index, const char *func_name,
std::string *error_code, std::string *error_msg) const {
// Note : Repurposing some Write update error codes here as specific details aren't called out for copy updates like they are
// for write updates
switch (src_set->descriptors_[index]->descriptor_class) {
case PlainSampler: {
for (uint32_t di = 0; di < update->descriptorCount; ++di) {
const auto src_desc = src_set->descriptors_[index + di].get();
if (!src_desc->updated) continue;
if (!src_desc->IsImmutableSampler()) {
auto update_sampler = static_cast<SamplerDescriptor *>(src_desc)->GetSampler();
if (!ValidateSampler(update_sampler, device_data_)) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00325";
std::stringstream error_str;
error_str << "Attempted copy update to sampler descriptor with invalid sampler: " << update_sampler << ".";
*error_msg = error_str.str();
return false;
}
} else {
// TODO : Warn here
}
}
break;
}
case ImageSampler: {
for (uint32_t di = 0; di < update->descriptorCount; ++di) {
const auto src_desc = src_set->descriptors_[index + di].get();
if (!src_desc->updated) continue;
auto img_samp_desc = static_cast<const ImageSamplerDescriptor *>(src_desc);
// First validate sampler
if (!img_samp_desc->IsImmutableSampler()) {
auto update_sampler = img_samp_desc->GetSampler();
if (!ValidateSampler(update_sampler, device_data_)) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00325";
std::stringstream error_str;
error_str << "Attempted copy update to sampler descriptor with invalid sampler: " << update_sampler << ".";
*error_msg = error_str.str();
return false;
}
} else {
// TODO : Warn here
}
// Validate image
auto image_view = img_samp_desc->GetImageView();
auto image_layout = img_samp_desc->GetImageLayout();
if (!ValidateImageUpdate(image_view, image_layout, type, device_data_, func_name, error_code, error_msg)) {
std::stringstream error_str;
error_str << "Attempted copy update to combined image sampler descriptor failed due to: " << error_msg->c_str();
*error_msg = error_str.str();
return false;
}
}
break;
}
case Image: {
for (uint32_t di = 0; di < update->descriptorCount; ++di) {
const auto src_desc = src_set->descriptors_[index + di].get();
if (!src_desc->updated) continue;
auto img_desc = static_cast<const ImageDescriptor *>(src_desc);
auto image_view = img_desc->GetImageView();
auto image_layout = img_desc->GetImageLayout();
if (!ValidateImageUpdate(image_view, image_layout, type, device_data_, func_name, error_code, error_msg)) {
std::stringstream error_str;
error_str << "Attempted copy update to image descriptor failed due to: " << error_msg->c_str();
*error_msg = error_str.str();
return false;
}
}
break;
}
case TexelBuffer: {
for (uint32_t di = 0; di < update->descriptorCount; ++di) {
const auto src_desc = src_set->descriptors_[index + di].get();
if (!src_desc->updated) continue;
auto buffer_view = static_cast<TexelDescriptor *>(src_desc)->GetBufferView();
auto bv_state = GetBufferViewState(device_data_, buffer_view);
if (!bv_state) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00323";
std::stringstream error_str;
error_str << "Attempted copy update to texel buffer descriptor with invalid buffer view: " << buffer_view;
*error_msg = error_str.str();
return false;
}
auto buffer = bv_state->create_info.buffer;
if (!ValidateBufferUsage(GetBufferState(device_data_, buffer), type, error_code, error_msg)) {
std::stringstream error_str;
error_str << "Attempted copy update to texel buffer descriptor failed due to: " << error_msg->c_str();
*error_msg = error_str.str();
return false;
}
}
break;
}
case GeneralBuffer: {
for (uint32_t di = 0; di < update->descriptorCount; ++di) {
const auto src_desc = src_set->descriptors_[index + di].get();
if (!src_desc->updated) continue;
auto buffer = static_cast<BufferDescriptor *>(src_desc)->GetBuffer();
if (!ValidateBufferUsage(GetBufferState(device_data_, buffer), type, error_code, error_msg)) {
std::stringstream error_str;
error_str << "Attempted copy update to buffer descriptor failed due to: " << error_msg->c_str();
*error_msg = error_str.str();
return false;
}
}
break;
}
case InlineUniform:
case AccelerationStructure:
break;
default:
assert(0); // We've already verified update type so should never get here
break;
}
// All checks passed so update contents are good
return true;
}
// Update the common AllocateDescriptorSetsData
void cvdescriptorset::UpdateAllocateDescriptorSetsData(const layer_data *dev_data, const VkDescriptorSetAllocateInfo *p_alloc_info,
AllocateDescriptorSetsData *ds_data) {
for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) {
auto layout = GetDescriptorSetLayout(dev_data, p_alloc_info->pSetLayouts[i]);
if (layout) {
ds_data->layout_nodes[i] = layout;
// Count total descriptors required per type
for (uint32_t j = 0; j < layout->GetBindingCount(); ++j) {
const auto &binding_layout = layout->GetDescriptorSetLayoutBindingPtrFromIndex(j);
uint32_t typeIndex = static_cast<uint32_t>(binding_layout->descriptorType);
ds_data->required_descriptors_by_type[typeIndex] += binding_layout->descriptorCount;
}
}
// Any unknown layouts will be flagged as errors during ValidateAllocateDescriptorSets() call
}
}
// Verify that the state at allocate time is correct, but don't actually allocate the sets yet
bool cvdescriptorset::ValidateAllocateDescriptorSets(const core_validation::layer_data *dev_data,
const VkDescriptorSetAllocateInfo *p_alloc_info,
const AllocateDescriptorSetsData *ds_data) {
bool skip = false;
auto report_data = core_validation::GetReportData(dev_data);
auto pool_state = GetDescriptorPoolState(dev_data, p_alloc_info->descriptorPool);
for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) {
auto layout = GetDescriptorSetLayout(dev_data, p_alloc_info->pSetLayouts[i]);
if (layout) { // nullptr layout indicates no valid layout handle for this device, validated/logged in object_tracker
if (layout->IsPushDescriptor()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
HandleToUint64(p_alloc_info->pSetLayouts[i]), "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-00308",
"Layout 0x%" PRIxLEAST64 " specified at pSetLayouts[%" PRIu32
"] in vkAllocateDescriptorSets() was created with invalid flag %s set.",
HandleToUint64(p_alloc_info->pSetLayouts[i]), i,
"VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR");
}
if (layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT &&
!(pool_state->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
0, "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-03044",
"Descriptor set layout create flags and pool create flags mismatch for index (%d)", i);
}
}
}
if (!GetDeviceExtensions(dev_data)->vk_khr_maintenance1) {
// Track number of descriptorSets allowable in this pool
if (pool_state->availableSets < p_alloc_info->descriptorSetCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
HandleToUint64(pool_state->pool), "VUID-VkDescriptorSetAllocateInfo-descriptorSetCount-00306",
"Unable to allocate %u descriptorSets from pool 0x%" PRIxLEAST64
". This pool only has %d descriptorSets remaining.",
p_alloc_info->descriptorSetCount, HandleToUint64(pool_state->pool), pool_state->availableSets);
}
// Determine whether descriptor counts are satisfiable
for (auto it = ds_data->required_descriptors_by_type.begin(); it != ds_data->required_descriptors_by_type.end(); ++it) {
if (ds_data->required_descriptors_by_type.at(it->first) > pool_state->availableDescriptorTypeCount[it->first]) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
HandleToUint64(pool_state->pool), "VUID-VkDescriptorSetAllocateInfo-descriptorPool-00307",
"Unable to allocate %u descriptors of type %s from pool 0x%" PRIxLEAST64
". This pool only has %d descriptors of this type remaining.",
ds_data->required_descriptors_by_type.at(it->first),
string_VkDescriptorType(VkDescriptorType(it->first)), HandleToUint64(pool_state->pool),
pool_state->availableDescriptorTypeCount[it->first]);
}
}
}
const auto *count_allocate_info = lvl_find_in_chain<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT>(p_alloc_info->pNext);
if (count_allocate_info) {
if (count_allocate_info->descriptorSetCount != 0 &&
count_allocate_info->descriptorSetCount != p_alloc_info->descriptorSetCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, 0,
"VUID-VkDescriptorSetVariableDescriptorCountAllocateInfoEXT-descriptorSetCount-03045",
"VkDescriptorSetAllocateInfo::descriptorSetCount (%d) != "
"VkDescriptorSetVariableDescriptorCountAllocateInfoEXT::descriptorSetCount (%d)",
p_alloc_info->descriptorSetCount, count_allocate_info->descriptorSetCount);
}
if (count_allocate_info->descriptorSetCount == p_alloc_info->descriptorSetCount) {
for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) {
auto layout = GetDescriptorSetLayout(dev_data, p_alloc_info->pSetLayouts[i]);
if (count_allocate_info->pDescriptorCounts[i] > layout->GetDescriptorCountFromBinding(layout->GetMaxBinding())) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, 0,
"VUID-VkDescriptorSetVariableDescriptorCountAllocateInfoEXT-pSetLayouts-03046",
"pDescriptorCounts[%d] = (%d), binding's descriptorCount = (%d)", i,
count_allocate_info->pDescriptorCounts[i], layout->GetDescriptorCountFromBinding(layout->GetMaxBinding()));
}
}
}
}
return skip;
}
// Decrement allocated sets from the pool and insert new sets into set_map
void cvdescriptorset::PerformAllocateDescriptorSets(const VkDescriptorSetAllocateInfo *p_alloc_info,
const VkDescriptorSet *descriptor_sets,
const AllocateDescriptorSetsData *ds_data,
std::unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> *pool_map,
std::unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> *set_map,
layer_data *dev_data) {
auto pool_state = (*pool_map)[p_alloc_info->descriptorPool];
// Account for sets and individual descriptors allocated from pool
pool_state->availableSets -= p_alloc_info->descriptorSetCount;
for (auto it = ds_data->required_descriptors_by_type.begin(); it != ds_data->required_descriptors_by_type.end(); ++it) {
pool_state->availableDescriptorTypeCount[it->first] -= ds_data->required_descriptors_by_type.at(it->first);
}
const auto *variable_count_info = lvl_find_in_chain<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT>(p_alloc_info->pNext);
bool variable_count_valid = variable_count_info && variable_count_info->descriptorSetCount == p_alloc_info->descriptorSetCount;
// Create tracking object for each descriptor set; insert into global map and the pool's set.
for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) {
uint32_t variable_count = variable_count_valid ? variable_count_info->pDescriptorCounts[i] : 0;
auto new_ds = new cvdescriptorset::DescriptorSet(descriptor_sets[i], p_alloc_info->descriptorPool, ds_data->layout_nodes[i],
variable_count, dev_data);
pool_state->sets.insert(new_ds);
new_ds->in_use.store(0);
(*set_map)[descriptor_sets[i]] = new_ds;
}
}
cvdescriptorset::PrefilterBindRequestMap::PrefilterBindRequestMap(cvdescriptorset::DescriptorSet &ds, const BindingReqMap &in_map,
GLOBAL_CB_NODE *cb_state)
: filtered_map_(), orig_map_(in_map) {
if (ds.GetTotalDescriptorCount() > kManyDescriptors_) {
filtered_map_.reset(new std::map<uint32_t, descriptor_req>());
ds.FilterAndTrackBindingReqs(cb_state, orig_map_, filtered_map_.get());
}
}
cvdescriptorset::PrefilterBindRequestMap::PrefilterBindRequestMap(cvdescriptorset::DescriptorSet &ds, const BindingReqMap &in_map,
GLOBAL_CB_NODE *cb_state, PIPELINE_STATE *pipeline)
: filtered_map_(), orig_map_(in_map) {
if (ds.GetTotalDescriptorCount() > kManyDescriptors_) {
filtered_map_.reset(new std::map<uint32_t, descriptor_req>());
ds.FilterAndTrackBindingReqs(cb_state, pipeline, orig_map_, filtered_map_.get());
}
}
| 1 | 9,477 | This crashed in an app using descriptor_indexing with some unbound descriptors. | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -39,14 +39,15 @@ MODULES_TO_CHECK = [
MODULES_NAMES = [m[1] for m in MODULES_TO_CHECK]
[email protected]_stdlib
[email protected]
@pytest.mark.parametrize(
("test_module_location", "test_module_name"), MODULES_TO_CHECK, ids=MODULES_NAMES
)
-def test_lib_module_no_crash(
+def test_primer_stdlib_no_crash(
test_module_location: str, test_module_name: str, capsys: CaptureFixture
) -> None:
"""Test that pylint does not produces any crashes or fatal errors on stdlib modules"""
+ __tracebackhide__ = True # pylint: disable=unused-variable
os.chdir(test_module_location)
with _patch_stdout(io.StringIO()):
try: | 1 | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
import contextlib
import io
import os
import sys
import pytest
from pytest import CaptureFixture
import pylint.lint
def is_module(filename: str) -> bool:
return filename.endswith(".py")
def is_package(filename: str, location: str) -> bool:
return os.path.exists(os.path.join(location, filename, "__init__.py"))
@contextlib.contextmanager
def _patch_stdout(out):
sys.stdout = out
try:
yield
finally:
sys.stdout = sys.__stdout__
LIB_DIRS = [os.path.dirname(os.__file__)]
MODULES_TO_CHECK = [
(location, module)
for location in LIB_DIRS
for module in os.listdir(location)
if is_module(module) or is_package(module, location)
]
MODULES_NAMES = [m[1] for m in MODULES_TO_CHECK]
@pytest.mark.primer_stdlib
@pytest.mark.parametrize(
("test_module_location", "test_module_name"), MODULES_TO_CHECK, ids=MODULES_NAMES
)
def test_lib_module_no_crash(
test_module_location: str, test_module_name: str, capsys: CaptureFixture
) -> None:
"""Test that pylint does not produces any crashes or fatal errors on stdlib modules"""
os.chdir(test_module_location)
with _patch_stdout(io.StringIO()):
try:
# We want to test all the code we can
enables = ["--enable-all-extensions", "--enable=all"]
# Duplicate code takes too long and is relatively safe
# We don't want to lint the test directory which are repetitive
disables = ["--disable=duplicate-code", "--ignore=test"]
pylint.lint.Run([test_module_name] + enables + disables)
except SystemExit as ex:
out, err = capsys.readouterr()
assert not err, err
assert not out
msg = f"Encountered {{}} during primer stlib test for {test_module_name}"
assert ex.code != 32, msg.format("a crash")
assert ex.code % 2 == 0, msg.format("a message of category 'fatal'")
| 1 | 16,852 | I think we might want to keep these marks separate? Then we can create a new `workflow` file with 3 jobs: 1) Prime stdlib 2) Prime batch 1 of external 2) Prime batch 2 of external | PyCQA-pylint | py |
@@ -263,6 +263,13 @@ namespace MvvmCross.Droid.Support.V4
{
((Android.Support.V4.App.Fragment)fragInfo.CachedFragment).Arguments.Clear();
((Android.Support.V4.App.Fragment)fragInfo.CachedFragment).Arguments.PutAll(bundle);
+
+ var childViewModelCache = Mvx.GetSingleton<IMvxChildViewModelCache>();
+ if (childViewModelCache.Exists(fragInfo.CachedFragment.ViewModel.GetType()))
+ {
+ fragInfo.CachedFragment.ViewModel = childViewModelCache.Get(fragInfo.CachedFragment.ViewModel.GetType());
+ childViewModelCache.Remove(fragInfo.CachedFragment.ViewModel.GetType());
+ }
}
else
{ | 1 | // MvxCachingFragmentActivity.cs
// (c) Copyright Cirrious Ltd. http://www.cirrious.com
// MvvmCross is licensed using Microsoft Public License (Ms-PL)
// Contributions and inspirations noted in readme.md and license.txt
//
// Project Lead - Stuart Lodge, @slodge, [email protected]
using System;
using System.Collections.Generic;
using System.Linq;
using Android.Content;
using Android.OS;
using Android.Runtime;
using Android.Support.V4.App;
using MvvmCross.Binding.Droid.BindingContext;
using MvvmCross.Core.ViewModels;
using MvvmCross.Core.Views;
using MvvmCross.Droid.Platform;
using MvvmCross.Droid.Shared.Attributes;
using MvvmCross.Droid.Shared.Caching;
using MvvmCross.Droid.Shared.Fragments;
using MvvmCross.Droid.Shared.Presenter;
using MvvmCross.Droid.Views;
using MvvmCross.Platform;
using MvvmCross.Platform.Exceptions;
using MvvmCross.Platform.Platform;
using Fragment = Android.Support.V4.App.Fragment;
namespace MvvmCross.Droid.Support.V4
{
[Register("mvvmcross.droid.support.v4.MvxCachingFragmentActivity")]
public class MvxCachingFragmentActivity : MvxFragmentActivity, IFragmentCacheableActivity, IMvxFragmentHost
{
public const string ViewModelRequestBundleKey = "__mvxViewModelRequest";
private const string SavedFragmentTypesKey = "__mvxSavedFragmentTypes";
private IFragmentCacheConfiguration _fragmentCacheConfiguration;
protected enum FragmentReplaceMode
{
NoReplace,
ReplaceFragment,
ReplaceFragmentAndViewModel
}
protected MvxCachingFragmentActivity()
{
}
protected MvxCachingFragmentActivity(IntPtr javaReference, JniHandleOwnership transfer)
: base(javaReference, transfer)
{}
protected override void OnCreate(Bundle bundle)
{
// Prevents crash when activity in background with history enable is reopened after
// Android does some auto memory management.
var setup = MvxAndroidSetupSingleton.EnsureSingletonAvailable(this);
setup.EnsureInitialized();
base.OnCreate(bundle);
var rootView = Window.DecorView.RootView;
EventHandler onGlobalLayout = null;
onGlobalLayout = (sender, args) =>
{
rootView.ViewTreeObserver.GlobalLayout -= onGlobalLayout;
ViewModel?.Appeared();
};
rootView.ViewTreeObserver.GlobalLayout += onGlobalLayout;
if (bundle == null)
HandleIntent(Intent);
else
{
IMvxJsonConverter serializer;
if (!Mvx.TryResolve(out serializer))
{
Mvx.Trace(
"Could not resolve IMvxJsonConverter, it is going to be hard to create ViewModel cache");
return;
}
FragmentCacheConfiguration.RestoreCacheConfiguration(bundle, serializer);
// Gabriel has blown his trumpet. Ressurect Fragments from the dead
RestoreFragmentsCache();
RestoreViewModelsFromBundle(serializer, bundle);
}
}
protected override void OnNewIntent(Intent intent)
{
base.OnNewIntent(intent);
HandleIntent(intent);
}
protected virtual void HandleIntent(Intent intent)
{
var fragmentRequestText = intent.Extras?.GetString(ViewModelRequestBundleKey);
if (fragmentRequestText == null)
return;
var converter = Mvx.Resolve<IMvxNavigationSerializer>();
var fragmentRequest = converter.Serializer.DeserializeObject<MvxViewModelRequest>(fragmentRequestText);
var mvxAndroidViewPresenter = Mvx.Resolve<IMvxAndroidViewPresenter>();
mvxAndroidViewPresenter.Show(fragmentRequest);
}
private static void RestoreViewModelsFromBundle(IMvxJsonConverter serializer, Bundle savedInstanceState)
{
IMvxSavedStateConverter savedStateConverter;
IMvxMultipleViewModelCache viewModelCache;
IMvxViewModelLoader viewModelLoader;
if (!Mvx.TryResolve(out savedStateConverter))
{
Mvx.Trace("Could not resolve IMvxSavedStateConverter, won't be able to convert saved state");
return;
}
if (!Mvx.TryResolve(out viewModelCache))
{
Mvx.Trace("Could not resolve IMvxMultipleViewModelCache, won't be able to convert saved state");
return;
}
if (!Mvx.TryResolve(out viewModelLoader))
{
Mvx.Trace("Could not resolve IMvxViewModelLoader, won't be able to load ViewModel for caching");
return;
}
// Harder ressurection, just in case we were killed to death.
var json = savedInstanceState.GetString(SavedFragmentTypesKey);
if (string.IsNullOrEmpty(json)) return;
var savedState = serializer.DeserializeObject<Dictionary<string, Type>>(json);
foreach (var item in savedState)
{
var bundle = savedInstanceState.GetBundle(item.Key);
if (bundle.IsEmpty) continue;
var mvxBundle = savedStateConverter.Read(bundle);
var request = MvxViewModelRequest.GetDefaultRequest(item.Value);
// repopulate the ViewModel with the SavedState and cache it.
var vm = viewModelLoader.LoadViewModel(request, mvxBundle);
viewModelCache.Cache(vm, item.Key);
}
}
private void RestoreFragmentsCache()
{
// See if Fragments were just sleeping, and repopulate the _lookup (which is accesed in GetFragmentInfoByTag)
// with references to them.
// we do not want to restore fragments which aren't tracked by our cache
foreach (var fragment in GetCurrentCacheableFragments())
{
// if used tag is proper tag such that:
// it is unique and immutable
// and fragment is properly registered
// then there must be exactly one matching value in _lookup fragment cache container
var fragmentTag = GetTagFromFragment(fragment);
var fragmentInfo = GetFragmentInfoByTag(fragmentTag);
fragmentInfo.CachedFragment = fragment as IMvxFragmentView;
}
}
private Dictionary<string, Type> CreateFragmentTypesDictionary(Bundle outState)
{
IMvxSavedStateConverter savedStateConverter;
if (!Mvx.TryResolve(out savedStateConverter))
{
return null;
}
var typesForKeys = new Dictionary<string, Type>();
var currentFragsInfo = GetCurrentCacheableFragmentsInfo();
foreach (var info in currentFragsInfo)
{
var fragment = info.CachedFragment as IMvxFragmentView;
if (fragment == null)
continue;
var mvxBundle = fragment.CreateSaveStateBundle();
var bundle = new Bundle();
savedStateConverter.Write(bundle, mvxBundle);
outState.PutBundle(info.Tag, bundle);
if(!typesForKeys.ContainsKey(info.Tag))
typesForKeys.Add(info.Tag, info.ViewModelType);
}
return typesForKeys;
}
protected virtual void ReplaceFragment(FragmentTransaction ft, IMvxCachedFragmentInfo fragInfo)
{
ft.Replace(fragInfo.ContentId, fragInfo.CachedFragment as Android.Support.V4.App.Fragment, fragInfo.Tag);
}
protected override void OnSaveInstanceState(Bundle outState)
{
base.OnSaveInstanceState(outState);
IMvxJsonConverter ser;
if (FragmentCacheConfiguration.HasAnyFragmentsRegisteredToCache && Mvx.TryResolve(out ser))
{
FragmentCacheConfiguration.SaveFragmentCacheConfigurationState(outState, ser);
var typesForKeys = CreateFragmentTypesDictionary(outState);
if (typesForKeys == null)
return;
var json = ser.SerializeObject(typesForKeys);
outState.PutString(SavedFragmentTypesKey, json);
}
}
/// <summary>
/// Show Fragment with a specific tag at a specific placeholder
/// </summary>
/// <param name="tag">The tag for the fragment to lookup</param>
/// <param name="contentId">Where you want to show the Fragment</param>
/// <param name="bundle">Bundle which usually contains a Serialized MvxViewModelRequest</param>
/// <param name="forceAddToBackStack">If you want to force add the fragment to the backstack so on backbutton it will go back to it. Note: This will override IMvxCachedFragmentInfo.AddToBackStack configuration.</param>
/// <param name="forceReplaceFragment">If you want the fragment to be re-created</param>
protected virtual void ShowFragment(string tag, int contentId, Bundle bundle, bool forceAddToBackStack = false, bool forceReplaceFragment = false)
{
IMvxCachedFragmentInfo fragInfo;
FragmentCacheConfiguration.TryGetValue(tag, out fragInfo);
IMvxCachedFragmentInfo currentFragInfo = null;
var currentFragment = SupportFragmentManager.FindFragmentById(contentId);
if (currentFragment != null)
FragmentCacheConfiguration.TryGetValue(currentFragment.Tag, out currentFragInfo);
if (fragInfo == null)
throw new MvxException("Could not find tag: {0} in cache, you need to register it first.", tag);
// We shouldn't replace the current fragment unless we really need to.
FragmentReplaceMode fragmentReplaceMode = FragmentReplaceMode.ReplaceFragmentAndViewModel;
if (!forceReplaceFragment)
fragmentReplaceMode = ShouldReplaceCurrentFragment(fragInfo, currentFragInfo, bundle);
if (fragmentReplaceMode == FragmentReplaceMode.NoReplace)
return;
var ft = SupportFragmentManager.BeginTransaction();
OnBeforeFragmentChanging(fragInfo, ft);
fragInfo.ContentId = contentId;
//If we already have a previously created fragment, we only need to send the new parameters
if (fragInfo.CachedFragment != null && fragmentReplaceMode == FragmentReplaceMode.ReplaceFragment)
{
((Android.Support.V4.App.Fragment)fragInfo.CachedFragment).Arguments.Clear();
((Android.Support.V4.App.Fragment)fragInfo.CachedFragment).Arguments.PutAll(bundle);
}
else
{
//Otherwise, create one and cache it
fragInfo.CachedFragment = Android.Support.V4.App.Fragment.Instantiate(this, FragmentJavaName(fragInfo.FragmentType),
bundle) as IMvxFragmentView;
OnFragmentCreated(fragInfo, ft);
}
currentFragment = fragInfo.CachedFragment as Android.Support.V4.App.Fragment;
ft.Replace(fragInfo.ContentId, fragInfo.CachedFragment as Android.Support.V4.App.Fragment, fragInfo.Tag);
//if replacing ViewModel then clear the cache after the fragment
//has been added to the transaction so that the Tag property is not null
//and the UniqueImmutableCacheTag property (if not overridden) has the correct value
if (fragmentReplaceMode == FragmentReplaceMode.ReplaceFragmentAndViewModel)
{
var cache = Mvx.GetSingleton<IMvxMultipleViewModelCache>();
cache.GetAndClear(fragInfo.ViewModelType, GetTagFromFragment(fragInfo.CachedFragment as Android.Support.V4.App.Fragment));
}
if ((currentFragment != null && fragInfo.AddToBackStack) || forceAddToBackStack)
{
ft.AddToBackStack(fragInfo.Tag);
}
OnFragmentChanging(fragInfo, ft);
ft.Commit();
SupportFragmentManager.ExecutePendingTransactions();
OnFragmentChanged(fragInfo);
}
protected virtual FragmentReplaceMode ShouldReplaceCurrentFragment(IMvxCachedFragmentInfo newFragment, IMvxCachedFragmentInfo currentFragment, Bundle replacementBundle)
{
var oldBundle = ((Android.Support.V4.App.Fragment)newFragment.CachedFragment)?.Arguments;
if (oldBundle == null) return FragmentReplaceMode.ReplaceFragment;
var serializer = Mvx.Resolve<IMvxNavigationSerializer>();
var json = oldBundle.GetString(MvxFragmentsPresenter.ViewModelRequestBundleKey);
var oldRequest = serializer.Serializer.DeserializeObject<MvxViewModelRequest>(json);
if (oldRequest == null) return FragmentReplaceMode.ReplaceFragment;
json = replacementBundle.GetString(MvxFragmentsPresenter.ViewModelRequestBundleKey);
var replacementRequest = serializer.Serializer.DeserializeObject<MvxViewModelRequest>(json);
if (replacementRequest == null) return FragmentReplaceMode.ReplaceFragment;
var areParametersEqual = ((oldRequest.ParameterValues == replacementRequest.ParameterValues) ||
(oldRequest.ParameterValues.Count == replacementRequest.ParameterValues.Count &&
!oldRequest.ParameterValues.Except(replacementRequest.ParameterValues).Any()));
if (currentFragment?.Tag != newFragment.Tag)
{
return !areParametersEqual
? FragmentReplaceMode.ReplaceFragmentAndViewModel
: FragmentReplaceMode.ReplaceFragment;
}
else
return !areParametersEqual
? FragmentReplaceMode.ReplaceFragmentAndViewModel
: FragmentReplaceMode.NoReplace;
}
public override void OnBackPressed()
{
if (SupportFragmentManager.BackStackEntryCount >= 1)
{
SupportFragmentManager.PopBackStackImmediate();
if (FragmentCacheConfiguration.EnableOnFragmentPoppedCallback)
{
//NOTE(vvolkgang) this is returning ALL the frags. Should we return only the visible ones?
var currentFragsInfo = GetCurrentCacheableFragmentsInfo();
OnFragmentPopped(currentFragsInfo);
}
return;
}
base.OnBackPressed();
}
protected virtual List<IMvxCachedFragmentInfo> GetCurrentCacheableFragmentsInfo()
{
return GetCurrentCacheableFragments()
.Select(frag => GetFragmentInfoByTag(GetTagFromFragment(frag)))
.ToList();
}
protected virtual IEnumerable<Android.Support.V4.App.Fragment> GetCurrentCacheableFragments()
{
var currentFragments = SupportFragmentManager.Fragments ?? Enumerable.Empty<Android.Support.V4.App.Fragment>();
return currentFragments
.Where(fragment => fragment != null)
// we are not interested in fragments which are not supposed to cache!
.Where(fragment => fragment.GetType().IsFragmentCacheable(GetType()));
}
protected virtual IMvxCachedFragmentInfo GetLastFragmentInfo()
{
var currentCacheableFragments = GetCurrentCacheableFragments().ToList();
if (!currentCacheableFragments.Any())
throw new InvalidOperationException("Cannot retrieve last fragment as FragmentManager is empty.");
var lastFragment = currentCacheableFragments.Last();
var tagFragment = GetTagFromFragment(lastFragment);
return GetFragmentInfoByTag(tagFragment);
}
protected virtual string GetTagFromFragment(Android.Support.V4.App.Fragment fragment)
{
var mvxFragmentView = fragment as IMvxFragmentView;
// ReSharper disable once PossibleNullReferenceException
// Fragment can never be null because registered fragment has to inherit from IMvxFragmentView
return mvxFragmentView.UniqueImmutableCacheTag;
}
/// <summary>
/// Close Fragment with a specific tag at a specific placeholder
/// </summary>
/// <param name="tag">The tag for the fragment to lookup</param>
/// <param name="contentId">Where you want to close the Fragment</param>
protected virtual void CloseFragment(string tag, int contentId)
{
var frag = SupportFragmentManager.FindFragmentById(contentId);
if (frag == null) return;
SupportFragmentManager.PopBackStackImmediate(tag, 1);
}
protected virtual string FragmentJavaName(Type fragmentType)
{
return Java.Lang.Class.FromType(fragmentType).Name;
}
public virtual void OnBeforeFragmentChanging(IMvxCachedFragmentInfo fragmentInfo, FragmentTransaction transaction)
{
}
// Called before the transaction is commited
public virtual void OnFragmentChanging(IMvxCachedFragmentInfo fragmentInfo, FragmentTransaction transaction) { }
public virtual void OnFragmentChanged(IMvxCachedFragmentInfo fragmentInfo)
{
}
public virtual void OnFragmentPopped(IList<IMvxCachedFragmentInfo> currentFragmentsInfo)
{
}
public virtual void OnFragmentCreated(IMvxCachedFragmentInfo fragmentInfo, FragmentTransaction transaction)
{
}
protected IMvxCachedFragmentInfo GetFragmentInfoByTag(string tag)
{
IMvxCachedFragmentInfo fragInfo;
FragmentCacheConfiguration.TryGetValue(tag, out fragInfo);
if (fragInfo == null)
throw new MvxException("Could not find tag: {0} in cache, you need to register it first.", tag);
return fragInfo;
}
public IFragmentCacheConfiguration FragmentCacheConfiguration => _fragmentCacheConfiguration ?? (_fragmentCacheConfiguration = BuildFragmentCacheConfiguration());
public virtual IFragmentCacheConfiguration BuildFragmentCacheConfiguration()
{
return new DefaultFragmentCacheConfiguration();
}
protected virtual string GetFragmentTag(MvxViewModelRequest request, Bundle bundle, Type fragmentType)
{
// THAT won't work properly if you have multiple instance of same fragment type in same FragmentHost.
// Override that in such cases
return request.ViewModelType.FullName;
}
public virtual bool Show(MvxViewModelRequest request, Bundle bundle, Type fragmentType, MvxFragmentAttribute fragmentAttribute)
{
var fragmentTag = GetFragmentTag(request, bundle, fragmentType);
FragmentCacheConfiguration.RegisterFragmentToCache(fragmentTag, fragmentType, request.ViewModelType, fragmentAttribute.AddToBackStack);
ShowFragment(fragmentTag, fragmentAttribute.FragmentContentId, bundle);
return true;
}
public virtual bool Close(IMvxViewModel viewModel)
{
//Workaround for closing fragments. This will not work when showing multiple fragments of the same viewmodel type in one activity
var frag = GetCurrentCacheableFragmentsInfo ().FirstOrDefault (x => x.ViewModelType == viewModel.GetType());
if (frag == null)
{
return false;
}
// Close method can not be fully fixed at this moment. That requires some changes in main MvvmCross library
CloseFragment(frag.Tag, frag.ContentId);
return true;
}
public override void OnAttachedToWindow()
{
base.OnAttachedToWindow();
ViewModel?.Appearing();
}
public override void OnDetachedFromWindow()
{
base.OnDetachedFromWindow();
ViewModel?.Disappearing(); // we don't have anywhere to get this info
ViewModel?.Disappeared();
}
}
public abstract class MvxCachingFragmentActivity<TViewModel>
: MvxCachingFragmentActivity
, IMvxAndroidView<TViewModel> where TViewModel : class, IMvxViewModel
{
public new TViewModel ViewModel
{
get { return (TViewModel)base.ViewModel; }
set { base.ViewModel = value; }
}
}
}
| 1 | 12,709 | Throw `fragInfo.CachedFragment.ViewModel.GetType()` into a variable so it's not evaluated each time. | MvvmCross-MvvmCross | .cs |
@@ -189,7 +189,7 @@ class User < ActiveRecord::Base
end
def preferred_languages
- @preferred_languages ||= Locale.list(languages)
+ Locale.list(languages)
end
def nearby(radius = NEARBY_RADIUS, num = NEARBY_USERS) | 1 | # == Schema Information
#
# Table name: users
#
# email :string not null
# id :integer not null, primary key
# pass_crypt :string not null
# creation_time :datetime not null
# display_name :string default(""), not null
# data_public :boolean default(FALSE), not null
# description :text default(""), not null
# home_lat :float
# home_lon :float
# home_zoom :integer default(3)
# nearby :integer default(50)
# pass_salt :string
# image_file_name :text
# email_valid :boolean default(FALSE), not null
# new_email :string
# creation_ip :string
# languages :string
# status :enum default("pending"), not null
# terms_agreed :datetime
# consider_pd :boolean default(FALSE), not null
# auth_uid :string
# preferred_editor :string
# terms_seen :boolean default(FALSE), not null
# description_format :enum default("markdown"), not null
# image_fingerprint :string
# changesets_count :integer default(0), not null
# traces_count :integer default(0), not null
# diary_entries_count :integer default(0), not null
# image_use_gravatar :boolean default(FALSE), not null
# image_content_type :string
# auth_provider :string
# home_tile :integer
#
# Indexes
#
# users_auth_idx (auth_provider,auth_uid) UNIQUE
# users_display_name_idx (display_name) UNIQUE
# users_display_name_lower_idx (lower((display_name)::text))
# users_email_idx (email) UNIQUE
# users_email_lower_idx (lower((email)::text))
# users_home_idx (home_tile)
#
class User < ActiveRecord::Base
require "xml/libxml"
has_many :traces, -> { where(:visible => true) }
has_many :diary_entries, -> { order(:created_at => :desc) }
has_many :diary_comments, -> { order(:created_at => :desc) }
has_many :diary_entry_subscriptions, :class_name => "DiaryEntrySubscription"
has_many :diary_subscriptions, :through => :diary_entry_subscriptions, :source => :diary_entry
has_many :messages, -> { where(:to_user_visible => true).order(:sent_on => :desc).preload(:sender, :recipient) }, :foreign_key => :to_user_id
has_many :new_messages, -> { where(:to_user_visible => true, :message_read => false).order(:sent_on => :desc) }, :class_name => "Message", :foreign_key => :to_user_id
has_many :sent_messages, -> { where(:from_user_visible => true).order(:sent_on => :desc).preload(:sender, :recipient) }, :class_name => "Message", :foreign_key => :from_user_id
has_many :friends, -> { joins(:befriendee).where(:users => { :status => %w[active confirmed] }) }
has_many :friend_users, :through => :friends, :source => :befriendee
has_many :tokens, :class_name => "UserToken"
has_many :preferences, :class_name => "UserPreference"
has_many :changesets, -> { order(:created_at => :desc) }
has_many :changeset_comments, :foreign_key => :author_id
has_and_belongs_to_many :changeset_subscriptions, :class_name => "Changeset", :join_table => "changesets_subscribers", :foreign_key => "subscriber_id"
has_many :note_comments, :foreign_key => :author_id
has_many :notes, :through => :note_comments
has_many :client_applications
has_many :oauth_tokens, -> { order(:authorized_at => :desc).preload(:client_application) }, :class_name => "OauthToken"
has_many :blocks, :class_name => "UserBlock"
has_many :blocks_created, :class_name => "UserBlock", :foreign_key => :creator_id
has_many :blocks_revoked, :class_name => "UserBlock", :foreign_key => :revoker_id
has_many :roles, :class_name => "UserRole"
scope :visible, -> { where(:status => %w[pending active confirmed]) }
scope :active, -> { where(:status => %w[active confirmed]) }
scope :identifiable, -> { where(:data_public => true) }
has_attached_file :image,
:default_url => "/assets/:class/:attachment/:style.png",
:styles => { :large => "100x100>", :small => "50x50>" }
validates :display_name, :presence => true, :allow_nil => true, :length => 3..255,
:exclusion => %w[new terms save confirm confirm-email go_public reset-password forgot-password suspended]
validates :display_name, :if => proc { |u| u.display_name_changed? },
:uniqueness => { :case_sensitive => false }
validates :display_name, :if => proc { |u| u.display_name_changed? },
:format => { :with => %r{\A[^\x00-\x1f\x7f\ufffe\uffff/;.,?%#]*\z} }
validates :display_name, :if => proc { |u| u.display_name_changed? },
:format => { :with => /\A\S/, :message => "has leading whitespace" }
validates :display_name, :if => proc { |u| u.display_name_changed? },
:format => { :with => /\S\z/, :message => "has trailing whitespace" }
validates :email, :presence => true, :confirmation => true
validates :email, :if => proc { |u| u.email_changed? },
:uniqueness => { :case_sensitive => false }
validates :pass_crypt, :confirmation => true, :length => 8..255
validates :home_lat, :allow_nil => true, :numericality => true, :inclusion => { :in => -90..90 }
validates :home_lon, :allow_nil => true, :numericality => true, :inclusion => { :in => -180..180 }
validates :home_zoom, :allow_nil => true, :numericality => { :only_integer => true }
validates :preferred_editor, :inclusion => Editors::ALL_EDITORS, :allow_nil => true
validates :image, :attachment_content_type => { :content_type => %r{\Aimage/.*\Z} }
validates :auth_uid, :unless => proc { |u| u.auth_provider.nil? },
:uniqueness => { :scope => :auth_provider }
validates_email_format_of :email, :if => proc { |u| u.email_changed? }
validates_email_format_of :new_email, :allow_blank => true, :if => proc { |u| u.new_email_changed? }
after_initialize :set_defaults
before_save :encrypt_password
before_save :update_tile
after_save :spam_check
def to_param
display_name
end
def self.authenticate(options)
if options[:username] && options[:password]
user = find_by("email = ? OR display_name = ?", options[:username], options[:username])
if user.nil?
users = where("LOWER(email) = LOWER(?) OR LOWER(display_name) = LOWER(?)", options[:username], options[:username])
user = users.first if users.count == 1
end
if user && PasswordHash.check(user.pass_crypt, user.pass_salt, options[:password])
if PasswordHash.upgrade?(user.pass_crypt, user.pass_salt)
user.pass_crypt, user.pass_salt = PasswordHash.create(options[:password])
user.save
end
else
user = nil
end
elsif options[:token]
token = UserToken.find_by(:token => options[:token])
user = token.user if token
end
if user &&
(user.status == "deleted" ||
(user.status == "pending" && !options[:pending]) ||
(user.status == "suspended" && !options[:suspended]))
user = nil
end
token.update(:expiry => 1.week.from_now) if token && user
user
end
def to_xml
doc = OSM::API.new.get_xml_doc
doc.root << to_xml_node
doc
end
def to_xml_node
el1 = XML::Node.new "user"
el1["display_name"] = display_name.to_s
el1["account_created"] = creation_time.xmlschema
if home_lat && home_lon
home = XML::Node.new "home"
home["lat"] = home_lat.to_s
home["lon"] = home_lon.to_s
home["zoom"] = home_zoom.to_s
el1 << home
end
el1
end
def description
RichText.new(self[:description_format], self[:description])
end
def languages
attribute_present?(:languages) ? self[:languages].split(/ *[, ] */) : []
end
def languages=(languages)
self[:languages] = languages.join(",")
end
def preferred_language
languages.find { |l| Language.exists?(:code => l) }
end
def preferred_languages
@preferred_languages ||= Locale.list(languages)
end
def nearby(radius = NEARBY_RADIUS, num = NEARBY_USERS)
if home_lon && home_lat
gc = OSM::GreatCircle.new(home_lat, home_lon)
sql_for_area = QuadTile.sql_for_area(gc.bounds(radius), "home_")
sql_for_distance = gc.sql_for_distance("home_lat", "home_lon")
nearby = User.active.identifiable
.where("id != ?", id)
.where(sql_for_area)
.where("#{sql_for_distance} <= ?", radius)
.order(sql_for_distance)
.limit(num)
else
nearby = []
end
nearby
end
def distance(nearby_user)
OSM::GreatCircle.new(home_lat, home_lon).distance(nearby_user.home_lat, nearby_user.home_lon)
end
def is_friends_with?(new_friend)
friends.where(:friend_user_id => new_friend.id).exists?
end
##
# returns true if a user is visible
def visible?
%w[pending active confirmed].include? status
end
##
# returns true if a user is active
def active?
%w[active confirmed].include? status
end
##
# returns true if the user has the moderator role, false otherwise
def moderator?
has_role? "moderator"
end
##
# returns true if the user has the administrator role, false otherwise
def administrator?
has_role? "administrator"
end
##
# returns true if the user has the requested role
def has_role?(role)
roles.any? { |r| r.role == role }
end
##
# returns the first active block which would require users to view
# a message, or nil if there are none.
def blocked_on_view
blocks.active.detect(&:needs_view?)
end
##
# delete a user - leave the account but purge most personal data
def delete
self.display_name = "user_#{id}"
self.description = ""
self.home_lat = nil
self.home_lon = nil
self.image = nil
self.email_valid = false
self.new_email = nil
self.auth_provider = nil
self.auth_uid = nil
self.status = "deleted"
save
end
##
# return a spam score for a user
def spam_score
changeset_score = changesets.size * 50
trace_score = traces.size * 50
diary_entry_score = diary_entries.visible.inject(0) { |acc, elem| acc + elem.body.spam_score }
diary_comment_score = diary_comments.visible.inject(0) { |acc, elem| acc + elem.body.spam_score }
score = description.spam_score / 4.0
score += diary_entries.where("created_at > ?", 1.day.ago).count * 10
score += diary_entry_score / diary_entries.length unless diary_entries.empty?
score += diary_comment_score / diary_comments.length unless diary_comments.empty?
score -= changeset_score
score -= trace_score
score.to_i
end
##
# perform a spam check on a user
def spam_check
update(:status => "suspended") if status == "active" && spam_score > SPAM_THRESHOLD
end
##
# return an oauth access token for a specified application
def access_token(application_key)
ClientApplication.find_by(:key => application_key).access_token_for_user(self)
end
private
def set_defaults
self.creation_time = Time.now.getutc unless attribute_present?(:creation_time)
end
def encrypt_password
if pass_crypt_confirmation
self.pass_crypt, self.pass_salt = PasswordHash.create(pass_crypt)
self.pass_crypt_confirmation = nil
end
end
def update_tile
self.home_tile = QuadTile.tile_for_point(home_lat, home_lon) if home_lat && home_lon
end
end
| 1 | 11,303 | Rather than just getting rid of the memoization maybe we should use an `after_save` filter to set it to `nil` so that it will be recomputed on next read? | openstreetmap-openstreetmap-website | rb |
@@ -0,0 +1,3 @@
+export default function remove(el) {
+ el.parentNode && el.parentNode.removeChild(el);
+} | 1 | 1 | 10,731 | JS abstraction leaks so this should be `if (el.parentNode !== null) { el.parentNode.removeChild(el) }` @developit | preactjs-preact | js |
|
@@ -30,6 +30,15 @@ type LedgerUSB struct {
hiddev *hid.Device
}
+// LedgerUSBError is a wrapper around the two-byte error code that the Ledger
+// protocol returns.
+type LedgerUSBError uint16
+
+// Error satisfies builtin interface `error`
+func (err LedgerUSBError) Error() string {
+ return fmt.Sprintf("unexpected status %x", err)
+}
+
// Protocol reference:
// https://github.com/LedgerHQ/blue-loader-python/blob/master/ledgerblue/comm.py (see HIDDongleHIDAPI)
// https://github.com/LedgerHQ/blue-loader-python/blob/master/ledgerblue/ledgerWrapper.py (see wrapCommandAPDU) | 1 | // Copyright (C) 2019 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package driver
import (
"encoding/binary"
"fmt"
"github.com/karalabe/hid"
)
// LedgerUSB is a wrapper around a Ledger USB HID device, used to implement
// the protocol used for sending messages to the application running on the
// Ledger hardware wallet.
type LedgerUSB struct {
hiddev *hid.Device
}
// Protocol reference:
// https://github.com/LedgerHQ/blue-loader-python/blob/master/ledgerblue/comm.py (see HIDDongleHIDAPI)
// https://github.com/LedgerHQ/blue-loader-python/blob/master/ledgerblue/ledgerWrapper.py (see wrapCommandAPDU)
// WritePackets sends a message to the Ledger device, by breaking it up
// into multiple packets as needed.
func (l *LedgerUSB) WritePackets(msg []byte) error {
first := true
sequenceIdx := 0
offset := 0
if len(msg) >= 1<<16 {
return fmt.Errorf("WritePackets: message too long (%d)", len(msg))
}
for {
var packet [64]byte
cur := packet[:]
binary.BigEndian.PutUint16(cur, 0x0101)
cur = cur[2:]
cur[0] = 0x05
cur = cur[1:]
binary.BigEndian.PutUint16(cur, uint16(sequenceIdx))
cur = cur[2:]
if first {
binary.BigEndian.PutUint16(cur, uint16(len(msg)))
cur = cur[2:]
first = false
}
copied := copy(cur, msg[offset:])
cc, err := l.hiddev.Write(packet[:])
if err != nil {
return err
}
if cc != len(packet) {
return fmt.Errorf("WritePackets: short write: %d != %d", cc, len(packet))
}
sequenceIdx++
offset += copied
if offset >= len(msg) {
// Nothing more to send
break
}
}
return nil
}
// ReadPackets reads a message from the Ledger device, assembling multiple
// packets as needed.
func (l *LedgerUSB) ReadPackets() ([]byte, error) {
first := true
sequenceIdx := 0
var dataleft uint16
var msg []byte
for {
var packet [64]byte
cc, err := l.hiddev.Read(packet[:])
if err != nil {
return nil, err
}
if cc != len(packet) {
return nil, fmt.Errorf("ReadPackets: short read: %d != %d", cc, len(packet))
}
cur := packet[:]
channel := binary.BigEndian.Uint16(cur)
cur = cur[2:]
if channel != 0x0101 {
return nil, fmt.Errorf("ReadPackets: wrong channel %x", channel)
}
if cur[0] != 0x05 {
return nil, fmt.Errorf("ReadPackets: wrong tag %x", cur[0])
}
cur = cur[1:]
seq := binary.BigEndian.Uint16(cur)
cur = cur[2:]
if seq != uint16(sequenceIdx) {
return nil, fmt.Errorf("ReadPackets: wrong seq %d", seq)
}
if first {
dataleft = binary.BigEndian.Uint16(cur)
cur = cur[2:]
first = false
}
if dataleft < uint16(len(cur)) {
msg = append(msg, cur[:dataleft]...)
dataleft = 0
} else {
msg = append(msg, cur...)
dataleft -= uint16(len(cur))
}
sequenceIdx++
if dataleft == 0 {
// Nothing more to receive
break
}
}
return msg, nil
}
// Exchange sends a message to the Ledger device, waits for a response,
// and returns the response data.
func (l *LedgerUSB) Exchange(msg []byte) ([]byte, error) {
err := l.WritePackets(msg)
if err != nil {
return nil, err
}
reply, err := l.ReadPackets()
if err != nil {
return nil, err
}
if len(reply) < 2 {
return nil, fmt.Errorf("Exchange: reply too short: %d < 2", len(reply))
}
replyMsg := reply[:len(reply)-2]
replyStat := binary.BigEndian.Uint16(reply[len(reply)-2:])
replyStatHi := replyStat & 0xff00
if replyStat != 0x9000 && replyStatHi != 0x6100 && replyStatHi != 0x6c00 {
// See various hints about what the error status might mean in
// HIDDongleHIDAPI.exchange():
// https://github.com/LedgerHQ/blue-loader-python/blob/master/ledgerblue/comm.py
return nil, fmt.Errorf("Exchange: unexpected status %x", replyStat)
}
return replyMsg, nil
}
// USBInfo returns information about the underlying USB device.
func (l *LedgerUSB) USBInfo() hid.DeviceInfo {
return l.hiddev.DeviceInfo
}
// LedgerEnumerate returns all of the Ledger devices connected to this machine.
func LedgerEnumerate() ([]LedgerUSB, error) {
if !hid.Supported() {
return nil, fmt.Errorf("HID not supported")
}
var devs []LedgerUSB
for _, info := range hid.Enumerate(0x2c97, 0x0001) {
dev, err := info.Open()
if err != nil {
return nil, err
}
devs = append(devs, LedgerUSB{
hiddev: dev,
})
}
return devs, nil
}
| 1 | 36,590 | nit: change it to `Exchange: unexpected status 0x%x` so that we'll know where this issue came from as well as making it easier to decode. | algorand-go-algorand | go |
@@ -23,13 +23,7 @@ func CstorVolumeArtifactsFor070() (list ArtifactList) {
return
}
-// cstorVolumeYamlsFor070 returns all the yamls related to cstor volume in a
-// string format
-//
-// NOTE:
-// This is an implementation of MultiYamlFetcher
-func cstorVolumeYamlsFor070() string {
- return `
+const cstorRunTask = `
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate | 1 | /*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
// CstorVolumeArtifactsFor070 returns the cstor volume related artifacts
// corresponding to version 0.7.0
func CstorVolumeArtifactsFor070() (list ArtifactList) {
list.Items = append(list.Items, ParseArtifactListFromMultipleYamls(cstorVolumeYamlsFor070)...)
return
}
// cstorVolumeYamlsFor070 returns all the yamls related to cstor volume in a
// string format
//
// NOTE:
// This is an implementation of MultiYamlFetcher
func cstorVolumeYamlsFor070() string {
return `
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate
metadata:
name: cstor-volume-create-default-0.7.0
spec:
defaultConfig:
- name: VolumeControllerImage
value: {{env "OPENEBS_IO_CSTOR_VOLUME_MGMT_IMAGE" | default "openebs/cstor-volume-mgmt:latest"}}
- name: VolumeTargetImage
value: {{env "OPENEBS_IO_CSTOR_TARGET_IMAGE" | default "openebs/cstor-istgt:latest"}}
- name: VolumeMonitorImage
value: {{env "OPENEBS_IO_VOLUME_MONITOR_IMAGE" | default "openebs/m-exporter:latest"}}
- name: ReplicaCount
value: "3"
# TargetResourceRequests allow you to specify resource requests that need to be available
# before scheduling the containers. If not specified, the default is to use the limits
# from TargetResourceLimits or the default requests set in the cluster.
- name: TargetResourceRequests
value: "none"
# TargetResourceLimits allow you to set the limits on memory and cpu for target pods
# The resource and limit value should be in the same format as expected by
# Kubernetes. Example:
#- name: TargetResourceLimits
# value: |-
# memory: 1Gi
# cpu: 200m
# By default, the resource limits are disabled.
- name: TargetResourceLimits
value: "none"
# AuxResourceLimits allow you to set limits on side cars. Limits have to be specified
# in the format expected by Kubernetes
- name: AuxResourceLimits
value: "none"
- name: RunNamespace
value: {{env "OPENEBS_NAMESPACE"}}
# ServiceAccountName is the account name assigned to volume management pod
# with permissions to view, create, edit, delete required custom resources
- name: ServiceAccountName
value: {{env "OPENEBS_SERVICE_ACCOUNT"}}
# FSType specifies the format type that Kubernetes should use to
# mount the Persistent Volume. Note that there are no validations
# done to check the validity of the FsType
- name: FSType
value: "ext4"
# Lun specifies the lun number with which Kubernetes should login
# to iSCSI Volume (i.e OpenEBS Persistent Volume)
- name: Lun
value: "0"
taskNamespace: {{env "OPENEBS_NAMESPACE"}}
run:
tasks:
- cstor-volume-create-listcstorpoolcr-default-0.7.0
- cstor-volume-create-puttargetservice-default-0.7.0
- cstor-volume-create-putcstorvolumecr-default-0.7.0
- cstor-volume-create-puttargetdeployment-default-0.7.0
- cstor-volume-create-putcstorvolumereplicacr-default-0.7.0
output: cstor-volume-create-output-default-0.7.0
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate
metadata:
name: cstor-volume-delete-default-0.7.0
spec:
defaultConfig:
- name: RunNamespace
value: {{env "OPENEBS_NAMESPACE"}}
taskNamespace: {{env "OPENEBS_NAMESPACE"}}
run:
tasks:
- cstor-volume-delete-listcstorvolumecr-default-0.7.0
- cstor-volume-delete-listtargetservice-default-0.7.0
- cstor-volume-delete-listtargetdeployment-default-0.7.0
- cstor-volume-delete-listcstorvolumereplicacr-default-0.7.0
- cstor-volume-delete-deletetargetservice-default-0.7.0
- cstor-volume-delete-deletetargetdeployment-default-0.7.0
- cstor-volume-delete-deletecstorvolumereplicacr-default-0.7.0
- cstor-volume-delete-deletecstorvolumecr-default-0.7.0
output: cstor-volume-delete-output-default-0.7.0
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate
metadata:
name: cstor-volume-read-default-0.7.0
spec:
defaultConfig:
- name: RunNamespace
value: {{env "OPENEBS_NAMESPACE"}}
taskNamespace: {{env "OPENEBS_NAMESPACE"}}
run:
tasks:
- cstor-volume-read-listtargetservice-default-0.7.0
- cstor-volume-read-listcstorvolumecr-default-0.7.0
- cstor-volume-read-listcstorvolumereplicacr-default-0.7.0
- cstor-volume-read-listtargetpod-default-0.7.0
output: cstor-volume-read-output-default-0.7.0
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate
metadata:
name: cstor-volume-list-default-0.7.0
spec:
defaultConfig:
- name: RunNamespace
value: {{env "OPENEBS_NAMESPACE"}}
taskNamespace: {{env "OPENEBS_NAMESPACE"}}
run:
tasks:
- cstor-volume-list-listtargetservice-default-0.7.0
- cstor-volume-list-listtargetpod-default-0.7.0
- cstor-volume-list-listcstorvolumereplicacr-default-0.7.0
output: cstor-volume-list-output-default-0.7.0
---
# runTask to list cstor pools
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-listcstorpoolcr-default-0.7.0
spec:
meta: |
id: cvolcreatelistpool
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: openebs.io/v1alpha1
kind: CStorPool
action: list
options: |-
labelSelector: openebs.io/storage-pool-claim={{ .Config.StoragePoolClaim.value }}
post: |
{{/*
Check if enough online pools are present to create replicas.
If pools are not present error out.
Save the cstorpool's uid:name into .ListItems.cvolPoolList otherwise
*/}}
{{- $replicaCount := int64 .Config.ReplicaCount.value | saveAs "rc" .ListItems -}}
{{- $poolsList := jsonpath .JsonResult "{range .items[?(@.status.phase=='Online')]}pkey=pools,{@.metadata.uid}={@.metadata.name};{end}" | trim | default "" | splitListTrim ";" -}}
{{- $poolsList | saveAs "pl" .ListItems -}}
{{- len $poolsList | gt $replicaCount | verifyErr "not enough pools available to create replicas" | saveAs "cvolcreatelistpool.verifyErr" .TaskResult | noop -}}
{{- $poolsList | keyMap "cvolPoolList" .ListItems | noop -}}
{{- $poolsNodeList := jsonpath .JsonResult "{range .items[?(@.status.phase=='Online')]}pkey=pools,{@.metadata.uid}={@.metadata.labels.kubernetes\\.io/hostname};{end}" | trim | default "" | splitList ";" -}}
{{- $poolsNodeList | keyMap "cvolPoolNodeList" .ListItems | noop -}}
---
# runTask to create cStor target service
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-puttargetservice-default-0.7.0
spec:
meta: |
apiVersion: v1
kind: Service
action: put
id: cvolcreateputsvc
runNamespace: {{.Config.RunNamespace.value}}
post: |
{{- jsonpath .JsonResult "{.metadata.name}" | trim | saveAs "cvolcreateputsvc.objectName" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.spec.clusterIP}" | trim | saveAs "cvolcreateputsvc.clusterIP" .TaskResult | noop -}}
task: |
apiVersion: v1
kind: Service
metadata:
labels:
openebs.io/target-service: cstor-target-svc
openebs.io/storage-engine-type: cstor
openebs.io/cas-type: cstor
openebs.io/persistent-volume: {{ .Volume.owner }}
name: {{ .Volume.owner }}
spec:
ports:
- name: cstor-iscsi
port: 3260
protocol: TCP
targetPort: 3260
- name: mgmt
port: 6060
targetPort: 6060
protocol: TCP
selector:
app: cstor-volume-manager
openebs.io/target: cstor-target
openebs.io/persistent-volume: {{ .Volume.owner }}
---
# runTask to create cStorVolume
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-putcstorvolumecr-default-0.7.0
spec:
meta: |
apiVersion: openebs.io/v1alpha1
kind: CStorVolume
id: cvolcreateputvolume
runNamespace: {{.Config.RunNamespace.value}}
action: put
post: |
{{- jsonpath .JsonResult "{.metadata.uid}" | trim | saveAs "cvolcreateputvolume.cstorid" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.metadata.name}" | trim | saveAs "cvolcreateputvolume.objectName" .TaskResult | noop -}}
task: |
{{- $replicaCount := .Config.ReplicaCount.value | int64 -}}
apiVersion: openebs.io/v1alpha1
kind: CStorVolume
metadata:
name: {{ .Volume.owner }}
annotations:
openebs.io/fs-type: {{ .Config.FSType.value }}
openebs.io/lun: {{ .Config.Lun.value }}
labels:
openebs.io/persistent-volume: {{ .Volume.owner }}
spec:
targetIP: {{ .TaskResult.cvolcreateputsvc.clusterIP }}
capacity: {{ .Volume.capacity }}
nodeBase: iqn.2016-09.com.openebs.cstor
iqn: iqn.2016-09.com.openebs.cstor:{{ .Volume.owner }}
targetPortal: {{ .TaskResult.cvolcreateputsvc.clusterIP }}:3260
targetPort: 3260
status: ""
replicationFactor: {{ $replicaCount }}
consistencyFactor: {{ div $replicaCount 2 | floor | add1 }}
---
# runTask to create cStor target deployment
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-puttargetdeployment-default-0.7.0
spec:
meta: |
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: apps/v1beta1
kind: Deployment
action: put
id: cvolcreateputctrl
post: |
{{- jsonpath .JsonResult "{.metadata.name}" | trim | saveAs "cvolcreateputctrl.objectName" .TaskResult | noop -}}
task: |
{{- $isMonitor := .Config.VolumeMonitor.enabled | default "true" | lower -}}
{{- $setResourceRequests := .Config.TargetResourceRequests.value | default "none" -}}
{{- $resourceRequestsVal := fromYaml .Config.TargetResourceRequests.value -}}
{{- $setResourceLimits := .Config.TargetResourceLimits.value | default "none" -}}
{{- $resourceLimitsVal := fromYaml .Config.TargetResourceLimits.value -}}
{{- $setAuxResourceLimits := .Config.AuxResourceLimits.value | default "none" -}}
{{- $auxResourceLimitsVal := fromYaml .Config.AuxResourceLimits.value -}}
apiVersion: apps/v1beta1
Kind: Deployment
metadata:
name: {{ .Volume.owner }}-target
labels:
app: cstor-volume-manager
openebs.io/storage-engine-type: cstor
openebs.io/cas-type: cstor
openebs.io/target: cstor-target
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/persistent-volume-claim: {{ .Volume.pvc }}
annotations:
{{- if eq $isMonitor "true" }}
openebs.io/volume-monitor: "true"
{{- end}}
openebs.io/volume-type: cstor
spec:
replicas: 1
selector:
matchLabels:
app: cstor-volume-manager
openebs.io/target: cstor-target
openebs.io/persistent-volume: {{ .Volume.owner }}
template:
metadata:
labels:
{{- if eq $isMonitor "true" }}
monitoring: volume_exporter_prometheus
{{- end}}
app: cstor-volume-manager
openebs.io/target: cstor-target
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/persistent-volume-claim: {{ .Volume.pvc }}
spec:
serviceAccountName: {{ .Config.ServiceAccountName.value }}
containers:
- image: {{ .Config.VolumeTargetImage.value }}
name: cstor-istgt
imagePullPolicy: IfNotPresent
resources:
{{- if ne $setResourceLimits "none" }}
limits:
{{- range $rKey, $rLimit := $resourceLimitsVal }}
{{ $rKey }}: {{ $rLimit }}
{{- end }}
{{- end }}
{{- if ne $setResourceRequests "none" }}
requests:
{{- range $rKey, $rReq := $resourceRequestsVal }}
{{ $rKey }}: {{ $rReq }}
{{- end }}
{{- end }}
ports:
- containerPort: 3260
protocol: TCP
securityContext:
privileged: true
volumeMounts:
- name: sockfile
mountPath: /var/run
- name: conf
mountPath: /usr/local/etc/istgt
- name: tmp
mountPath: /tmp
mountPropagation: Bidirectional
{{- if eq $isMonitor "true" }}
- image: {{ .Config.VolumeMonitorImage.value }}
name: maya-volume-exporter
{{- if ne $setAuxResourceLimits "none" }}
resources:
limits:
{{- range $rKey, $rLimit := $auxResourceLimitsVal }}
{{ $rKey }}: {{ $rLimit }}
{{- end }}
{{- end }}
args:
- "-e=cstor"
command: ["maya-exporter"]
ports:
- containerPort: 9500
protocol: TCP
volumeMounts:
- name: sockfile
mountPath: /var/run
- name: conf
mountPath: /usr/local/etc/istgt
{{- end}}
- name: cstor-volume-mgmt
image: {{ .Config.VolumeControllerImage.value }}
{{- if ne $setAuxResourceLimits "none" }}
resources:
limits:
{{- range $rKey, $rLimit := $auxResourceLimitsVal }}
{{ $rKey }}: {{ $rLimit }}
{{- end }}
{{- end }}
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
env:
- name: OPENEBS_IO_CSTOR_VOLUME_ID
value: {{ .TaskResult.cvolcreateputvolume.cstorid }}
securityContext:
privileged: true
volumeMounts:
- name: sockfile
mountPath: /var/run
- name: conf
mountPath: /usr/local/etc/istgt
- name: tmp
mountPath: /tmp
mountPropagation: Bidirectional
volumes:
- name: sockfile
emptyDir: {}
- name: conf
emptyDir: {}
- name: tmp
hostPath:
path: /var/openebs/shared-{{ .Volume.owner }}-target
type: DirectoryOrCreate
---
# runTask to create cStorVolumeReplica/(s)
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-putcstorvolumereplicacr-default-0.7.0
spec:
meta: |
apiVersion: openebs.io/v1alpha1
runNamespace: {{.Config.RunNamespace.value}}
kind: CStorVolumeReplica
action: put
id: cstorvolumecreatereplica
{{/*
Fetch all the cStorPool uids into a list.
Calculate the replica count
Add as many poolUid to resources as there is replica count
*/}}
{{- $poolUids := keys .ListItems.cvolPoolList.pools }}
{{- $replicaCount := .Config.ReplicaCount.value | int64 -}}
repeatWith:
resources:
{{- range $k, $v := $poolUids }}
{{- if lt $k $replicaCount }}
- {{ $v | quote }}
{{- end }}
{{- end }}
task: |
kind: CStorVolumeReplica
apiVersion: openebs.io/v1alpha1
metadata:
{{/*
We pluck the cStorPool name from the map[uid]name:
{ "uid1":"name1","uid2":"name2","uid2":"name2" }
The .ListItems.currentRepeatResource gives us the uid of one
of the pools from resources list
*/}}
name: {{ .Volume.owner }}-{{ pluck .ListItems.currentRepeatResource .ListItems.cvolPoolList.pools | first }}
labels:
cstorpool.openebs.io/name: {{ pluck .ListItems.currentRepeatResource .ListItems.cvolPoolList.pools | first }}
cstorpool.openebs.io/uid: {{ .ListItems.currentRepeatResource }}
cstorvolume.openebs.io/name: {{ .Volume.owner }}
openebs.io/persistent-volume: {{ .Volume.owner }}
annotations:
cstorpool.openebs.io/hostname: {{ pluck .ListItems.currentRepeatResource .ListItems.cvolPoolNodeList.pools | first }}
finalizers: ["cstorvolumereplica.openebs.io/finalizer"]
spec:
capacity: {{ .Volume.capacity }}
targetIP: {{ .TaskResult.cvolcreateputsvc.clusterIP }}
status:
# phase would be update by appropriate target
phase: ""
post: |
{{- jsonpath .JsonResult "{.metadata.name}" | trim | addTo "cstorvolumecreatereplica.objectName" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.metadata.spec.capacity}" | trim | saveAs "cstorvolumecreatereplica.capacity" .TaskResult | noop -}}
{{- $replicaPair := jsonpath .JsonResult "pkey=replicas,{@.metadata.name}={@.spec.capacity};" | trim | default "" | splitList ";" -}}
{{- $replicaPair | keyMap "replicaList" .ListItems | noop -}}
---
# runTask to render volume create output as CASVolume
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-output-default-0.7.0
spec:
meta: |
action: output
id: cstorvolumeoutput
kind: CASVolume
apiVersion: v1alpha1
task: |
kind: CASVolume
apiVersion: v1alpha1
metadata:
name: {{ .Volume.owner }}
spec:
capacity: {{ .Volume.capacity }}
iqn: iqn.2016-09.com.openebs.cstor:{{ .Volume.owner }}
targetPortal: {{ .TaskResult.cvolcreateputsvc.clusterIP }}:3260
targetIP: {{ .TaskResult.cvolcreateputsvc.clusterIP }}
targetPort: 3260
replicas: {{ .ListItems.replicaList.replicas | len }}
casType: cstor
---
# runTask to list all cstor target deployment services
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-list-listtargetservice-default-0.7.0
spec:
meta: |
{{- /*
Create and save list of namespaces to $nss.
Iterate over each namespace and perform list task
*/ -}}
{{- $nss := .Config.RunNamespace.value | default "" | splitList ", " -}}
id: listlistsvc
repeatWith:
metas:
{{- range $k, $ns := $nss }}
- runNamespace: {{ $ns }}
{{- end }}
apiVersion: v1
kind: Service
action: list
options: |-
labelSelector: openebs.io/target-service=cstor-target-svc
post: |
{{/*
We create a pair of "clusterIP"=xxxxx and save it for corresponding volume
The per volume is servicePair is identified by unique "namespace/vol-name" key
*/}}
{{- $servicePairs := jsonpath .JsonResult "{range .items[*]}pkey={@.metadata.labels.openebs\\.io/persistent-volume},clusterIP={@.spec.clusterIP};{end}" | trim | default "" | splitList ";" -}}
{{- $servicePairs | keyMap "volumeList" .ListItems | noop -}}
---
# runTask to list all cstor target pods
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-list-listtargetpod-default-0.7.0
spec:
meta: |
{{- $nss := .Config.RunNamespace.value | default "" | splitList ", " -}}
id: listlistctrl
repeatWith:
metas:
{{- range $k, $ns := $nss }}
- runNamespace: {{ $ns }}
{{- end }}
apiVersion: v1
kind: Pod
action: list
options: |-
labelSelector: openebs.io/target=cstor-target
post: |
{{/*
We create a pair of "targetIP"=xxxxx and save it for corresponding volume
The per volume is servicePair is identified by unique "namespace/vol-name" key
*/}}
{{- $targetPairs := jsonpath .JsonResult "{range .items[*]}pkey={@.metadata.labels.openebs\\.io/persistent-volume},targetIP={@.status.podIP},targetStatus={@.status.containerStatuses[*].ready};{end}" | trim | default "" | splitList ";" -}}
{{- $targetPairs | keyMap "volumeList" .ListItems | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-list-listcstorvolumereplicacr-default-0.7.0
spec:
meta: |
runNamespace: {{.Config.RunNamespace.value}}
id: listlistrep
apiVersion: openebs.io/v1alpha1
kind: CStorVolumeReplica
action: list
post: |
{{- $replicaPairs := jsonpath .JsonResult "{range .items[*]}pkey={@.metadata.labels.openebs\\.io/persistent-volume},replicaName={@.metadata.name},capacity={@.spec.capacity};{end}" | trim | default "" | splitList ";" -}}
{{- $replicaPairs | keyMap "volumeList" .ListItems | noop -}}
---
# runTask to render volume list output
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-list-output-default-0.7.0
spec:
meta: |
id : listoutput
action: output
kind: CASVolumeList
apiVersion: v1alpha1
task: |
kind: CASVolumeList
items:
{{/*
We have a unique key for each volume in .ListItems.volumeList
We iterate over it to extract various volume properties. These
properties were set in preceeding list tasks,
*/}}
{{- range $pkey, $map := .ListItems.volumeList }}
{{- $capacity := pluck "capacity" $map | first | default "" | splitList ", " | first }}
{{- $clusterIP := pluck "clusterIP" $map | first }}
{{- $targetStatus := pluck "targetStatus" $map | first }}
{{- $replicaName := pluck "replicaName" $map | first }}
{{- $name := $pkey }}
- kind: CASVolume
apiVersion: v1alpha1
metadata:
name: {{ $name }}
annotations:
openebs.io/cluster-ips: {{ $clusterIP }}
openebs.io/volume-size: {{ $capacity }}
openebs.io/controller-status: {{ $targetStatus | default "" | replace "true" "running" | replace "false" "notready" }}
spec:
capacity: {{ $capacity }}
iqn: iqn.2016-09.com.openebs.cstor:{{ $name }}
targetPortal: {{ $clusterIP }}:3260
targetIP: {{ $clusterIP }}
targetPort: 3260
replicas: {{ $replicaName | default "" | splitList ", " | len }}
casType: cstor
{{- end -}}
---
# runTask to list cStor target deployment service
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-read-listtargetservice-default-0.7.0
spec:
meta: |
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: v1
id: readlistsvc
kind: Service
action: list
options: |-
labelSelector: openebs.io/target-service=cstor-target-svc,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "readlistsvc.items" .TaskResult | noop -}}
{{- .TaskResult.readlistsvc.items | notFoundErr "target service not found" | saveIf "readlistsvc.notFoundErr" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].spec.clusterIP}" | trim | saveAs "readlistsvc.clusterIP" .TaskResult | noop -}}
---
# runTask to list cstor volume cr
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-read-listcstorvolumecr-default-0.7.0
spec:
meta: |
id: readlistcv
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: openebs.io/v1alpha1
kind: CStorVolume
action: list
options: |-
labelSelector: openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "readlistcv.names" .TaskResult | noop -}}
{{- .TaskResult.readlistcv.names | notFoundErr "cStor Volume CR not found" | saveIf "readlistcv.notFoundErr" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].metadata.annotations.openebs\\.io/fs-type}" | trim | default "ext4" | saveAs "readlistcv.fsType" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].metadata.annotations.openebs\\.io/lun}" | trim | default "0" | int | saveAs "readlistcv.lun" .TaskResult | noop -}}
---
# runTask to list cStor volume target pods
apiVersion: openebs.io/v1alpha1
# runTask to list all replicas of a volume
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-read-listcstorvolumereplicacr-default-0.7.0
spec:
meta: |
id: readlistrep
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: openebs.io/v1alpha1
kind: CStorVolumeReplica
action: list
options: |-
labelSelector: openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "readlistrep.items" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].metadata.annotations.cstorpool\\.openebs\\.io/hostname}" | trim | saveAs "readlistrep.hostname" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].metadata.labels.cstorpool\\.openebs\\.io/name}" | trim | saveAs "readlistrep.poolname" .TaskResult | noop -}}
{{- .TaskResult.readlistrep.items | notFoundErr "replicas not found" | saveIf "readlistrep.notFoundErr" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].spec.capacity}" | trim | saveAs "readlistrep.capacity" .TaskResult | noop -}}
---
# runTask to list cStor volume target pods
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-read-listtargetpod-default-0.7.0
spec:
meta: |
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: v1
kind: Pod
action: list
id: readlistctrl
options: |-
labelSelector: openebs.io/target=cstor-target,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "readlistctrl.items" .TaskResult | noop -}}
{{- .TaskResult.readlistctrl.items | notFoundErr "target pod not found" | saveIf "readlistctrl.notFoundErr" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].status.podIP}" | trim | saveAs "readlistctrl.podIP" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].status.containerStatuses[*].ready}" | trim | saveAs "readlistctrl.status" .TaskResult | noop -}}
---
# runTask to render output of read volume task as CAS Volume
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-read-output-default-0.7.0
spec:
meta: |
id : readoutput
action: output
kind: CASVolume
apiVersion: v1alpha1
task: |
{{/* We calculate capacity of the volume here. Pickup capacity from cvr */}}
{{- $capacity := .TaskResult.readlistrep.capacity | default "" | splitList " " | first -}}
kind: CASVolume
apiVersion: v1alpha1
metadata:
name: {{ .Volume.owner }}
{{/* Render other values into annotation */}}
annotations:
openebs.io/controller-ips: {{ .TaskResult.readlistctrl.podIP | default "" | splitList " " | first }}
openebs.io/controller-status: {{ .TaskResult.readlistctrl.status | default "" | splitList " " | join "," | replace "true" "running" | replace "false" "notready" }}
openebs.io/cvr-names: {{ .TaskResult.readlistrep.items | default "" | splitList " " | join "," }}
openebs.io/node-names: {{ .TaskResult.readlistrep.hostname | default "" | splitList " " | join "," }}
openebs.io/pool-names: {{ .TaskResult.readlistrep.poolname | default "" | splitList " " | join "," }}
spec:
capacity: {{ $capacity }}
iqn: iqn.2016-09.com.openebs.cstor:{{ .Volume.owner }}
targetPortal: {{ .TaskResult.readlistsvc.clusterIP }}:3260
targetIP: {{ .TaskResult.readlistsvc.clusterIP }}
targetPort: 3260
lun: {{ .TaskResult.readlistcv.lun }}
fsType: {{ .TaskResult.readlistcv.fsType }}
replicas: {{ .TaskResult.readlistrep.capacity | default "" | splitList " " | len }}
casType: cstor
---
# runTask to list the cstorvolume that has to be deleted
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-listcstorvolumecr-default-0.7.0
spec:
meta: |
runNamespace: {{.Config.RunNamespace.value}}
id: deletelistcsv
apiVersion: openebs.io/v1alpha1
kind: CStorVolume
action: list
options: |-
labelSelector: openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "deletelistcsv.names" .TaskResult | noop -}}
{{- .TaskResult.deletelistcsv.names | notFoundErr "cstor volume not found" | saveIf "deletelistcsv.notFoundErr" .TaskResult | noop -}}
{{- .TaskResult.deletelistcsv.names | default "" | splitList " " | isLen 1 | not | verifyErr "total no. cstor volume is not 1" | saveIf "deletelistcsv.verifyErr" .TaskResult | noop -}}
---
# runTask to list target service of volume to delete
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-listtargetservice-default-0.7.0
spec:
meta: |
id: deletelistsvc
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: v1
kind: Service
action: list
options: |-
labelSelector: openebs.io/target-service=cstor-target-svc,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{/*
Save the name of the service. Error if service is missing or more
than one service exists
*/}}
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "deletelistsvc.names" .TaskResult | noop -}}
{{- .TaskResult.deletelistsvc.names | notFoundErr "target service not found" | saveIf "deletelistsvc.notFoundErr" .TaskResult | noop -}}
{{- .TaskResult.deletelistsvc.names | default "" | splitList " " | isLen 1 | not | verifyErr "total no. of target services is not 1" | saveIf "deletelistsvc.verifyErr" .TaskResult | noop -}}
---
# runTask to list target deployment of volume to delete
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-listtargetdeployment-default-0.7.0
spec:
meta: |
id: deletelistctrl
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: apps/v1beta1
kind: Deployment
action: list
options: |-
labelSelector: openebs.io/target=cstor-target,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "deletelistctrl.names" .TaskResult | noop -}}
{{- .TaskResult.deletelistctrl.names | notFoundErr "target deployment not found" | saveIf "deletelistctrl.notFoundErr" .TaskResult | noop -}}
{{- .TaskResult.deletelistctrl.names | default "" | splitList " " | isLen 1 | not | verifyErr "total no. of target deployments is not 1" | saveIf "deletelistctrl.verifyErr" .TaskResult | noop -}}
---
# runTask to list cstorvolumereplica of volume to delete
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-listcstorvolumereplicacr-default-0.7.0
spec:
meta: |
id: deletelistcvr
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: openebs.io/v1alpha1
kind: CStorVolumeReplica
action: list
options: |-
labelSelector: openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{/*
List the names of the cstorvolumereplicas. Error if
cstorvolumereplica is missing, save to a map cvrlist otherwise
*/}}
{{- $cvrs := jsonpath .JsonResult "{range .items[*]}pkey=cvrs,{@.metadata.name}='';{end}" | trim | default "" | splitList ";" -}}
{{- $cvrs | notFoundErr "cstor volume replica not found" | saveIf "deletelistcvr.notFoundErr" .TaskResult | noop -}}
{{- $cvrs | keyMap "cvrlist" .ListItems | noop -}}
---
# runTask to delete cStor volume target service
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-deletetargetservice-default-0.7.0
spec:
meta: |
id: deletedeletesvc
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: v1
kind: Service
action: delete
objectName: {{ .TaskResult.deletelistsvc.names }}
---
# runTask to delete cStor volume target deployment
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-deletetargetdeployment-default-0.7.0
spec:
meta: |
id: deletedeletectrl
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: apps/v1beta1
kind: Deployment
action: delete
objectName: {{ .TaskResult.deletelistctrl.names }}
---
# runTask to delete cstorvolumereplica
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-deletecstorvolumereplicacr-default-0.7.0
spec:
meta: |
runNamespace: {{.Config.RunNamespace.value}}
id: deletedeletecvr
action: delete
kind: CStorVolumeReplica
objectName: {{ keys .ListItems.cvrlist.cvrs | join "," }}
apiVersion: openebs.io/v1alpha1
---
# runTask to delete cstorvolume
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-deletecstorvolumecr-default-0.7.0
spec:
meta: |
runNamespace: {{.Config.RunNamespace.value}}
id: deletedeletecsv
action: delete
apiVersion: openebs.io/v1alpha1
kind: CStorVolume
objectName: {{ pluck "names" .TaskResult.deletelistcsv | first }}
---
# runTask to render output of deleted volume.
# This task only returns the name of volume that is deleted
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-output-default-0.7.0
spec:
meta: |
id: deleteoutput
action: output
kind: CASVolume
apiVersion: v1alpha1
task: |
kind: CASVolume
apiVersion: v1alpha1
metadata:
name: {{ .Volume.owner }}
---
`
}
| 1 | 9,654 | Name of constant is not appropriate. Name of constant should make use of version no. as suffix. The constant can be a typed string versus a string. | openebs-maya | go |
@@ -51,6 +51,7 @@ module Bolt
command_options = []
# Need to be interactive if redirecting STDIN
command_options << '--interactive' unless options[:stdin].nil?
+ command_options << '--tty' if options[:tty]
command_options.concat(envs) unless envs.empty?
command_options << container_id
command_options.concat(command) | 1 | # frozen_string_literal: true
require 'logging'
require 'bolt/node/errors'
module Bolt
module Transport
class Docker < Base
class Connection
def initialize(target)
raise Bolt::ValidationError, "Target #{target.name} does not have a host" unless target.host
@target = target
@logger = Logging.logger[target.host]
@docker_host = @target.options['service-url']
end
def connect
# We don't actually have a connection, but we do need to
# check that the container exists and is running.
output = execute_local_docker_json_command('ps')
index = output.find_index { |item| item["ID"] == @target.host || item["Names"] == @target.host }
raise "Could not find a container with name or ID matching '#{@target.host}'" if index.nil?
# Now find the indepth container information
output = execute_local_docker_json_command('inspect', [output[index]["ID"]])
# Store the container information for later
@container_info = output[0]
@logger.debug { "Opened session" }
true
rescue StandardError => e
raise Bolt::Node::ConnectError.new(
"Failed to connect to #{@target.uri}: #{e.message}",
'CONNECT_ERROR'
)
end
# Executes a command inside the target container
#
# @param command [Array] The command to run, expressed as an array of strings
# @param options [Hash] command specific options
# @option opts [String] :interpreter statements that are prefixed to the command e.g `/bin/bash` or `cmd.exe /c`
# @option opts [Hash] :environment A hash of environment variables that will be injected into the command
# @option opts [IO] :stdin An IO object that will be used to redirect STDIN for the docker command
def execute(*command, options)
command.unshift(options[:interpreter]) if options[:interpreter]
# Build the `--env` parameters
envs = []
if options[:environment]
options[:environment].each { |env, val| envs.concat(['--env', "#{env}=#{val}"]) }
end
command_options = []
# Need to be interactive if redirecting STDIN
command_options << '--interactive' unless options[:stdin].nil?
command_options.concat(envs) unless envs.empty?
command_options << container_id
command_options.concat(command)
@logger.debug { "Executing: exec #{command_options}" }
stdout_str, stderr_str, status = execute_local_docker_command('exec', command_options, options[:stdin])
# The actual result is the exitstatus not the process object
status = status.nil? ? -32768 : status.exitstatus
if status == 0
@logger.debug { "Command returned successfully" }
else
@logger.info { "Command failed with exit code #{status}" }
end
stdout_str.force_encoding(Encoding::UTF_8)
stderr_str.force_encoding(Encoding::UTF_8)
# Normalise line endings
stdout_str.gsub!("\r\n", "\n")
stderr_str.gsub!("\r\n", "\n")
[stdout_str, stderr_str, status]
rescue StandardError
@logger.debug { "Command aborted" }
raise
end
def write_remote_file(source, destination)
_, stdout_str, status = execute_local_docker_command('cp', [source, "#{container_id}:#{destination}"])
raise "Error writing file to container #{@container_id}: #{stdout_str}" unless status.exitstatus.zero?
rescue StandardError => e
raise Bolt::Node::FileError.new(e.message, 'WRITE_ERROR')
end
def write_remote_directory(source, destination)
_, stdout_str, status = execute_local_docker_command('cp', [source, "#{container_id}:#{destination}"])
raise "Error writing directory to container #{@container_id}: #{stdout_str}" unless status.exitstatus.zero?
rescue StandardError => e
raise Bolt::Node::FileError.new(e.message, 'WRITE_ERROR')
end
def mkdirs(dirs)
_, stderr, exitcode = execute('mkdir', '-p', *dirs, {})
if exitcode != 0
message = "Could not create directories: #{stderr}"
raise Bolt::Node::FileError.new(message, 'MKDIR_ERROR')
end
end
def make_tempdir
tmpdir = @target.options.fetch('tmpdir', container_tmpdir)
tmppath = "#{tmpdir}/#{SecureRandom.uuid}"
stdout, stderr, exitcode = execute('mkdir', '-m', '700', tmppath, {})
if exitcode != 0
raise Bolt::Node::FileError.new("Could not make tempdir: #{stderr}", 'TEMPDIR_ERROR')
end
tmppath || stdout.first
end
def with_remote_tempdir
dir = make_tempdir
yield dir
ensure
if dir
_, stderr, exitcode = execute('rm', '-rf', dir, {})
if exitcode != 0
@logger.warn("Failed to clean up tempdir '#{dir}': #{stderr}")
end
end
end
def write_remote_executable(dir, file, filename = nil)
filename ||= File.basename(file)
remote_path = File.join(dir.to_s, filename)
write_remote_file(file, remote_path)
make_executable(remote_path)
remote_path
end
def make_executable(path)
_, stderr, exitcode = execute('chmod', 'u+x', path, {})
if exitcode != 0
message = "Could not make file '#{path}' executable: #{stderr}"
raise Bolt::Node::FileError.new(message, 'CHMOD_ERROR')
end
end
private
# Converts the JSON encoded STDOUT string from the docker cli into ruby objects
#
# @param stdout_string [String] The string to convert
# @return [Object] Ruby object representation of the JSON string
def extract_json(stdout_string)
# The output from the docker format command is a JSON string per line.
# We can't do a direct convert but this helper method will convert it into
# an array of Objects
stdout_string.split("\n")
.reject { |str| str.strip.empty? }
.map { |str| JSON.parse(str) }
end
# rubocop:disable Metrics/LineLength
# Executes a Docker CLI command
#
# @param subcommand [String] The docker subcommand to run e.g. 'inspect' for `docker inspect`
# @param command_options [Array] Additional command options e.g. ['--size'] for `docker inspect --size`
# @param redir_stdin [IO] IO object which will be use to as STDIN in the docker command. Default is nil, which does not perform redirection
# @return [String, String, Process::Status] The output of the command: STDOUT, STDERR, Process Status
# rubocop:enable Metrics/LineLength
def execute_local_docker_command(subcommand, command_options = [], redir_stdin = nil)
env_hash = {}
# Set the DOCKER_HOST if we are using a non-default service-url
env_hash['DOCKER_HOST'] = @docker_host unless @docker_host.nil?
command_options = [] if command_options.nil?
docker_command = [subcommand].concat(command_options)
# Always use binary mode for any text data
capture_options = { binmode: true }
capture_options[:stdin_data] = redir_stdin unless redir_stdin.nil?
stdout_str, stderr_str, status = Open3.capture3(env_hash, 'docker', *docker_command, capture_options)
[stdout_str, stderr_str, status]
end
# Executes a Docker CLI command and parses the output in JSON format
#
# @param subcommand [String] The docker subcommand to run e.g. 'inspect' for `docker inspect`
# @param command_options [Array] Additional command options e.g. ['--size'] for `docker inspect --size`
# @return [Object] Ruby object representation of the JSON string
def execute_local_docker_json_command(subcommand, command_options = [])
command_options = [] if command_options.nil?
command_options = ['--format', '{{json .}}'].concat(command_options)
stdout_str, _stderr_str, _status = execute_local_docker_command(subcommand, command_options)
extract_json(stdout_str)
end
# The full ID of the target container
#
# @return [String] The full ID of the target container
def container_id
@container_info["Id"]
end
# The temp path inside the target container
#
# @return [String] The absolute path to the temp directory
def container_tmpdir
'/tmp'
end
end
end
end
end
| 1 | 12,363 | @glennsarti you want to validate this on Windows? IIRC this won't work | puppetlabs-bolt | rb |
@@ -45,8 +45,11 @@ public class ZipkinElasticsearchHttpStorageAutoConfiguration {
ElasticsearchHttpStorage.Builder esHttpBuilder(
ZipkinElasticsearchHttpStorageProperties elasticsearch,
@Qualifier("zipkinElasticsearchHttp") OkHttpClient client,
- @Value("${zipkin.storage.strict-trace-id:true}") boolean strictTraceId) {
- return elasticsearch.toBuilder(client).strictTraceId(strictTraceId);
+ @Value("${zipkin.storage.strict-trace-id:true}") boolean strictTraceId,
+ @Value("${zipkin.query.lookback:86400000}") int namesLookback) {
+ return elasticsearch.toBuilder(client)
+ .strictTraceId(strictTraceId)
+ .namesLookback(namesLookback);
}
/** cheap check to see if we are likely to include urls */ | 1 | /**
* Copyright 2015-2017 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin.autoconfigure.storage.elasticsearch.http;
import okhttp3.OkHttpClient;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Condition;
import org.springframework.context.annotation.ConditionContext;
import org.springframework.context.annotation.Conditional;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.type.AnnotatedTypeMetadata;
import zipkin.storage.StorageComponent;
import zipkin.storage.elasticsearch.http.ElasticsearchHttpStorage;
@Configuration
@EnableConfigurationProperties(ZipkinElasticsearchHttpStorageProperties.class)
@ConditionalOnProperty(name = "zipkin.storage.type", havingValue = "elasticsearch")
@Conditional(ZipkinElasticsearchHttpStorageAutoConfiguration.HostsAreUrls.class)
@ConditionalOnMissingBean(StorageComponent.class)
public class ZipkinElasticsearchHttpStorageAutoConfiguration {
@Bean
@ConditionalOnMissingBean
StorageComponent storage(ElasticsearchHttpStorage.Builder esHttpBuilder) {
return esHttpBuilder.build();
}
@Bean
ElasticsearchHttpStorage.Builder esHttpBuilder(
ZipkinElasticsearchHttpStorageProperties elasticsearch,
@Qualifier("zipkinElasticsearchHttp") OkHttpClient client,
@Value("${zipkin.storage.strict-trace-id:true}") boolean strictTraceId) {
return elasticsearch.toBuilder(client).strictTraceId(strictTraceId);
}
/** cheap check to see if we are likely to include urls */
static final class HostsAreUrls implements Condition {
@Override public boolean matches(ConditionContext condition, AnnotatedTypeMetadata md) {
String hosts = condition.getEnvironment().getProperty("zipkin.storage.elasticsearch.hosts");
return hosts != null && (hosts.contains("http://") || hosts.contains("https://"));
}
}
}
| 1 | 12,215 | ps I'm ok to add this as a elasticsearch property instead. ex zipkin.storage.elasticsearch.names-lookback (we have similar things in cassandra) | openzipkin-zipkin | java |
@@ -69,12 +69,12 @@ class Search implements IWebSearch {
}
/**
- * Force a reindex.
+ * Force a re-index.
*/
reindex() {
let self = this;
- this.storage.getLocalDatabase(function(err, packages) {
- if (err) throw err; // that function shouldn't produce any
+ this.storage.getLocalDatabase(function(error, packages) {
+ if (error) throw error; // that function shouldn't produce any
let i = packages.length;
while (i--) {
self.add(packages[i]); | 1 | /**
* @prettier
* @flow
*/
import lunrMutable from 'lunr-mutable-indexes';
import type { Version } from '@verdaccio/types';
import type { IStorageHandler, IWebSearch } from '../../types';
/**
* Handle the search Indexer.
*/
class Search implements IWebSearch {
index: lunrMutable.index;
storage: IStorageHandler;
/**
* Constructor.
*/
constructor() {
/* eslint no-invalid-this: "off" */
this.index = lunrMutable(function() {
this.field('name', { boost: 10 });
this.field('description', { boost: 4 });
this.field('author', { boost: 6 });
this.field('keywords', { boost: 7 });
this.field('version');
this.field('readme');
});
}
/**
* Performs a query to the indexer.
* If the keyword is a * it returns all local elements
* otherwise performs a search
* @param {*} q the keyword
* @return {Array} list of results.
*/
query(query: string) {
return query === '*'
? this.storage.localStorage.localData.get(items => {
items.map(function(pkg) {
return { ref: pkg, score: 1 };
});
})
: this.index.search(`*${query}*`);
}
/**
* Add a new element to index
* @param {*} pkg the package
*/
add(pkg: Version) {
this.index.add({
id: pkg.name,
name: pkg.name,
description: pkg.description,
version: `v${pkg.version}`,
keywords: pkg.keywords,
author: pkg._npmUser ? pkg._npmUser.name : '???',
});
}
/**
* Remove an element from the index.
* @param {*} name the id element
*/
remove(name: string) {
this.index.remove({ id: name });
}
/**
* Force a reindex.
*/
reindex() {
let self = this;
this.storage.getLocalDatabase(function(err, packages) {
if (err) throw err; // that function shouldn't produce any
let i = packages.length;
while (i--) {
self.add(packages[i]);
}
});
}
/**
* Set up the {Storage}
* @param {*} storage An storage reference.
*/
configureStorage(storage: IStorageHandler) {
this.storage = storage;
this.reindex();
}
}
export default new Search();
| 1 | 19,420 | Curly brackets here `{ (breakLine) xx (breakLine)}` | verdaccio-verdaccio | js |
@@ -450,6 +450,11 @@ window.ConfigurationsView = countlyView.extend({
return false;
}
+ if($(this).val() === countlyGlobal["member"].username){
+ $(".username-check").remove();
+ return false;
+ }
+
$(this).next(".required").remove();
var existSpan = $("<span class='username-check red-text' data-localize='management-users.username.exists'>").html(jQuery.i18n.map["management-users.username.exists"]), | 1 | window.PluginsView = countlyView.extend({
initialize: function () {
this.filter = (store.get("countly_pluginsfilter")) ? store.get("countly_pluginsfilter") : "plugins-all";
},
beforeRender: function () {
if (this.template)
return $.when(countlyPlugins.initialize()).then(function () { });
else {
var self = this;
return $.when($.get(countlyGlobal["path"] + '/plugins/templates/plugins.html', function (src) {
self.template = Handlebars.compile(src);
}), countlyPlugins.initialize()).then(function () { });
}
},
renderCommon: function (isRefresh) {
var pluginsData = countlyPlugins.getData();
this.templateData = {
"page-title": jQuery.i18n.map["plugins.title"]
};
var self = this;
if (!isRefresh) {
$(this.el).html(this.template(this.templateData));
$("#" + this.filter).addClass("selected").addClass("active");
$.fn.dataTableExt.afnFiltering.push(function (oSettings, aData, iDataIndex) {
if (!$(oSettings.nTable).hasClass("plugins-filter")) {
return true;
}
if (self.filter == "plugins-enabled") {
return aData[1]
}
if (self.filter == "plugins-disabled") {
return !aData[1]
}
return true;
});
this.dtable = $('#plugins-table').dataTable($.extend({}, $.fn.dataTable.defaults, {
"aaData": pluginsData,
"bPaginate": false,
"aoColumns": [
{ "mData": function (row, type) { if (row.enabled) { return jQuery.i18n.map[row.code + ".plugin-title"] || jQuery.i18n.map[row.code + ".title"] || row.title; } else return row.title }, "sType": "string", "sTitle": jQuery.i18n.map["plugins.name"] },
{
"mData": function (row, type) {
if (type == "display") {
var disabled = (row.prepackaged) ? 'disabled' : '';
var input = '<div class="on-off-switch ' + disabled + '">';
if (row.enabled) {
input += '<input type="checkbox" class="on-off-switch-checkbox" id="plugin-' + row.code + '" checked ' + disabled + '>';
} else {
input += '<input type="checkbox" class="on-off-switch-checkbox" id="plugin-' + row.code + '" ' + disabled + '>';
}
input += '<label class="on-off-switch-label" for="plugin-' + row.code + '"></label>';
input += '<span class="text">' + jQuery.i18n.map["plugins.enable"] + '</span>';
return input;
} else {
return row.enabled;
}
},
"sType": "string", "sTitle": jQuery.i18n.map["plugins.state"], "sClass": "shrink"
},
{ "mData": function (row, type) { if (row.enabled) { return jQuery.i18n.map[row.code + ".plugin-description"] || jQuery.i18n.map[row.code + ".description"] || row.description; } else return row.description; }, "sType": "string", "sTitle": jQuery.i18n.map["plugins.description"], "bSortable": false, "sClass": "light" },
{ "mData": function (row, type) { return row.version; }, "sType": "string", "sTitle": jQuery.i18n.map["plugins.version"], "sClass": "center", "bSortable": false },
{ "mData": function (row, type) { if (row.homepage != "") return '<a class="plugin-link" href="' + row.homepage + '" target="_blank"><i class="ion-android-open"></i></a>'; else return ""; }, "sType": "string", "sTitle": jQuery.i18n.map["plugins.homepage"], "sClass": "shrink center", "bSortable": false }
]
}));
this.dtable.stickyTableHeaders();
this.dtable.fnSort([[0, 'asc']]);
/*
Make header sticky if scroll is more than the height of header
This is done in order to make Apply Changes button visible
*/
var navigationTop = $("#sticky-plugin-header").offset().top;
var tableHeaderTop = $("#plugins-table").find("thead").offset().top;
$(window).on("scroll", function (e) {
var topBarHeight = $("#top-bar").outerHeight();
var $fixedHeader = $("#sticky-plugin-header");
if ($(this).scrollTop() > navigationTop - topBarHeight) {
var width = $("#content-container").width();
$fixedHeader.addClass("fixed");
$fixedHeader.css({ width: width });
if (topBarHeight) {
$fixedHeader.css({ top: topBarHeight });
} else {
$fixedHeader.css({ top: 0 });
}
} else {
$fixedHeader.removeClass("fixed");
$fixedHeader.css({ width: "" });
}
if (($(this).scrollTop() + $fixedHeader.outerHeight()) > tableHeaderTop) {
$(".sticky-header").removeClass("hide");
} else {
$(".sticky-header").addClass("hide");
}
});
$(window).on("resize", function (e) {
var $fixedHeader = $("#sticky-plugin-header");
if ($fixedHeader.hasClass("fixed")) {
var width = $("#content-container").width();
$fixedHeader.css({ width: width });
}
});
}
},
refresh: function (Refreshme) {
if (Refreshme) {
var self = this;
return $.when(this.beforeRender()).then(function () {
if (app.activeView != self) {
return false;
}
CountlyHelpers.refreshTable(self.dtable, countlyPlugins.getData());
app.localize();
});
}
},
togglePlugin: function (plugins) {
var self = this;
var overlay = $("#overlay").clone();
$("body").append(overlay);
overlay.show();
var loader = $(this.el).find("#loader");
loader.show();
countlyPlugins.toggle(plugins, function (res) {
var msg = { clearAll: true };
if (res == "Success" || res == "Errors") {
var seconds = 10;
if (res == "Success") {
msg.title = jQuery.i18n.map["plugins.success"];
msg.message = jQuery.i18n.map["plugins.restart"] + " " + seconds + " " + jQuery.i18n.map["plugins.seconds"];
msg.info = jQuery.i18n.map["plugins.finish"];
msg.delay = seconds * 1000;
}
else if (res == "Errors") {
msg.title = jQuery.i18n.map["plugins.errors"];
msg.message = jQuery.i18n.map["plugins.errors-msg"];
msg.info = jQuery.i18n.map["plugins.restart"] + " " + seconds + " " + jQuery.i18n.map["plugins.seconds"];
msg.sticky = true;
msg.type = "error";
}
setTimeout(function () {
window.location.reload(true);
}, seconds * 1000);
}
else {
overlay.hide();
loader.hide();
msg.title = jQuery.i18n.map["plugins.error"];
msg.message = res;
msg.info = jQuery.i18n.map["plugins.retry"];
msg.sticky = true;
msg.type = "error";
}
CountlyHelpers.notify(msg);
});
},
filterPlugins: function (filter) {
this.filter = filter;
store.set("countly_pluginsfilter", filter);
$("#" + this.filter).addClass("selected").addClass("active");
this.dtable.fnDraw();
}
});
window.ConfigurationsView = countlyView.extend({
userConfig: false,
initialize: function () {
this.predefinedInputs = {};
this.predefinedLabels = {
};
this.configsData = {};
this.cache = {};
this.changes = {};
//register some common system config inputs
this.registerInput("apps.category", function (value) {
return null;
var categories = app.manageAppsView.getAppCategories();
var select = '<div class="cly-select" id="apps.category">' +
'<div class="select-inner">' +
'<div class="text-container">';
if (!categories[value])
select += '<div class="text"></div>';
else
select += '<div class="text">' + categories[value] + '</div>';
select += '</div>' +
'<div class="right combo"></div>' +
'</div>' +
'<div class="select-items square">' +
'<div>';
for (var i in categories) {
select += '<div data-value="' + i + '" class="segmentation-option item">' + categories[i] + '</div>';
}
select += '</div>' +
'</div>' +
'</div>';
return select;
});
this.registerInput("apps.country", function (value) {
var zones = app.manageAppsView.getTimeZones();
var select = '<div class="cly-select" id="apps.country">' +
'<div class="select-inner">' +
'<div class="text-container">';
if (!zones[value])
select += '<div class="text"></div>';
else
select += '<div class="text"><div class="flag" style="background-image:url(images/flags/' + value.toLowerCase() + '.png)"></div>' + zones[value].n + '</div>';
select += '</div>' +
'<div class="right combo"></div>' +
'</div>' +
'<div class="select-items square">' +
'<div>';
for (var i in zones) {
select += '<div data-value="' + i + '" class="segmentation-option item"><div class="flag" style="background-image:url(images/flags/' + i.toLowerCase() + '.png)"></div>' + zones[i].n + '</div>';
}
select += '</div>' +
'</div>' +
'</div>';
return select;
});
this.registerInput("frontend.theme", function (value) {
var themes = countlyPlugins.getThemeList();
var select = '<div class="cly-select" id="frontend.theme">' +
'<div class="select-inner">' +
'<div class="text-container">';
if (value && value.length)
select += '<div class="text">' + value + '</div>';
else
select += '<div class="text" data-localize="configs.no-theme">' + jQuery.i18n.map["configs.no-theme"] + '</div>';
select += '</div>' +
'<div class="right combo"></div>' +
'</div>' +
'<div class="select-items square">' +
'<div>';
for (var i = 0; i < themes.length; i++) {
if (themes[i] == "")
select += '<div data-value="" class="segmentation-option item" data-localize="configs.no-theme">' + jQuery.i18n.map["configs.no-theme"] + '</div>';
else
select += '<div data-value="' + themes[i] + '" class="segmentation-option item">' + themes[i] + '</div>';
}
select += '</div>' +
'</div>' +
'</div>';
return select;
});
//register some common system config inputs
this.registerInput("logs.default", function (value) {
var categories = ['debug', 'info', 'warn', 'error'];
var select = '<div class="cly-select" id="logs.default">' +
'<div class="select-inner">' +
'<div class="text-container">';
if (value && value.length)
select += '<div class="text" data-localize="configs.logs.' + value + '">' + jQuery.i18n.map["configs.logs." + value] + '</div>';
else
select += '<div class="text" data-localzie="configs.logs.warn">' + Query.i18n.map["configs.logs.warn"] + '</div>';
select += '</div>' +
'<div class="right combo"></div>' +
'</div>' +
'<div class="select-items square">' +
'<div>';
for (var i = 0; i < categories.length; i++) {
select += '<div data-value="' + categories[i] + '" class="segmentation-option item" data-localize="configs.logs.' + categories[i] + '">' + jQuery.i18n.map["configs.logs." + categories[i]] + '</div>';
}
select += '</div>' +
'</div>' +
'</div>';
return select;
});
this.registerInput("security.dashboard_additional_headers", function (value) {
return '<textarea rows="5" style="width:100%" id="security.dashboard_additional_headers">' + (value || "") + '</textarea>';
});
this.registerInput("security.api_additional_headers", function (value) {
return '<textarea rows="5" style="width:100%" id="security.api_additional_headers">' + (value || "") + '</textarea>';
});
this.registerInput("apps.timezone", function (value) {
return null;
});
},
beforeRender: function () {
if (this.template)
if (this.userConfig)
return $.when(countlyPlugins.initializeUserConfigs()).then(function () { });
else
return $.when(countlyPlugins.initializeConfigs()).then(function () { });
else {
var self = this;
if (this.userConfig)
return $.when($.get(countlyGlobal["path"] + '/plugins/templates/configurations.html', function (src) {
self.template = Handlebars.compile(src);
}), countlyPlugins.initializeUserConfigs()).then(function () { });
else
return $.when($.get(countlyGlobal["path"] + '/plugins/templates/configurations.html', function (src) {
self.template = Handlebars.compile(src);
}), countlyPlugins.initializeConfigs()).then(function () { });
}
},
renderCommon: function (isRefresh) {
if (this.reset)
$("#new-install-overlay").show();
if (this.userConfig)
this.configsData = countlyPlugins.getUserConfigsData();
else
this.configsData = countlyPlugins.getConfigsData();
this.searchKeys = this.getSearchKeys(this.configsData);
this.navTitles = this.setNavTitles(this.configsData);
this.selectedNav = this.navTitles.coreTitles[0];
var configsHTML;
var self = this;
var title = jQuery.i18n.map["plugins.configs"];
if (this.userConfig)
title = jQuery.i18n.map["plugins.user-configs"];
if (this.namespace && this.configsData[this.namespace]) {
this.selectedNav = this.navTitles.coreTitles.find(function (x) { return x.key === self.namespace }) || this.navTitles.pluginTitles.find(function (x) { return x.key === self.namespace });
configsHTML = this.generateConfigsTable(this.configsData);
}
else {
if (this.selectedNav && !this.userConfig)
app.navigate("/manage/configurations/" + this.selectedNav.key);
configsHTML = this.generateConfigsTable(this.configsData);
}
this.templateData = {
"page-title": title,
"configs": configsHTML,
"namespace": this.namespace,
"user": this.userConfig,
"reset": this.reset,
"navTitles": this.navTitles,
"selectedNav": this.selectedNav
};
if (this.success) {
CountlyHelpers.notify({
title: jQuery.i18n.map["configs.changed"],
message: jQuery.i18n.map["configs.saved"]
});
this.success = false;
if (this.userConfig)
app.noHistory("#/manage/user-settings");
else
app.noHistory("#/manage/configurations/" + this.selectedNav.key)
}
if (!isRefresh) {
$(this.el).html(this.template(this.templateData));
if (this.userConfig) {
$('#configs-title-bar').hide();
$('#config-title').html(jQuery.i18n.map['plugins.user-configs']);
$('#config-table-container').addClass('user-settings-table');
}
else {
$('#configs-table-widget').css('marginLeft', '200px');
$('#nav-item-' + this.selectedNav.key).addClass('selected');
}
this.changes = {};
this.cache = JSON.parse(JSON.stringify(this.configsData));
$(".configs #username").val(countlyGlobal["member"].username);
$(".configs #api-key").val(countlyGlobal["member"].api_key);
$("#configs-back").click(function () {
app.back('/manage/configurations');
});
$('.on-off-switch input').on("change", function () {
var isChecked = $(this).is(":checked"),
attrID = $(this).attr("id");
self.updateConfig(attrID, isChecked);
});
$(".configs input").keyup(function () {
var id = $(this).attr("id");
var value = $(this).val();
if ($(this).attr("type") == "number")
value = parseFloat(value);
self.updateConfig(id, value);
});
$(".configs textarea").keyup(function () {
var id = $(this).attr("id");
var value = $(this).val();
self.updateConfig(id, value);
});
$(".configs .segmentation-option").on("click", function () {
var id = $(this).closest(".cly-select").attr("id");
var value = $(this).data("value");
self.updateConfig(id, value);
});
$('.configs .user-config-select').on('cly-multi-select-change', function () {
var id = $(this).closest('.cly-multi-select').attr('id');
var _user = self.configsData[id.split('.')[0]][id.split('.')[1]];
var value = $(this).data("value");
var newUserValues = {};
for (var i in _user) {
newUserValues[i] = value.indexOf(i) >= 0;
}
self.updateConfig(id, newUserValues);
});
$(".configs #username").off("keyup").on("keyup", _.throttle(function () {
if (!($(this).val().length) || $("#menu-username").text() == $(this).val()) {
$(".username-check").remove();
return false;
}
$(this).next(".required").remove();
var existSpan = $("<span class='username-check red-text' data-localize='management-users.username.exists'>").html(jQuery.i18n.map["management-users.username.exists"]),
notExistSpan = $("<span class='username-check green-text'>").html("✔"),
data = {};
data.username = $(this).val();
data._csrf = countlyGlobal['csrf_token'];
var self = $(this);
$.ajax({
type: "POST",
url: countlyGlobal["path"] + "/users/check/username",
data: data,
success: function (result) {
$(".username-check").remove();
if (result) {
self.after(notExistSpan.clone());
} else {
self.after(existSpan.clone());
}
}
});
}, 300));
$(".configs #new_pwd").off("keyup").on("keyup", _.throttle(function () {
$(".password-check").remove();
var error = CountlyHelpers.validatePassword($(this).val());
if (error) {
var invalidSpan = $("<div class='password-check red-text'>").html(error);
$(this).after(invalidSpan.clone());
return false;
}
}, 300));
$(".configs #re_new_pwd").off("keyup").on("keyup", _.throttle(function () {
$(".password-check").remove();
var error = CountlyHelpers.validatePassword($(this).val());
if (error) {
var invalidSpan = $("<div class='password-check red-text'>").html(error);
$(this).after(invalidSpan.clone());
return false;
}
}, 300));
$(".configs #usersettings_regenerate").click(function () {
$(".configs #api-key").val(CountlyHelpers.generatePassword(32, true)).trigger("keyup");
});
$('.config-container').off('click').on('click', function () {
var key = $(this).attr('id').replace('nav-item-', '');
app.navigate("/manage/configurations/" + key);
self.selectedNav = self.navTitles.coreTitles.find(function (x) { return x.key === key }) || self.navTitles.pluginTitles.find(function (x) { return x.key === key });
self.templateData.selectedNav = self.selectedNav;
$('.config-container').removeClass('selected');
$('#nav-item-' + self.selectedNav.key).addClass('selected');
$('.config-table-row').hide();
$('.config-table-row-header').hide();
$('#config-title').html(self.selectedNav.label);
$('#config-table-row-' + self.selectedNav.key).show();
$('#config-table-row-header-' + self.selectedNav.key).show();
$('.config-table-details-row').show();
$('#config-table-container').removeClass('title-rows-enable');
$('#config-table-container').addClass('title-rows-hidden');
$('#empty-search-result').hide();
$('#search-box').val("");
})
$(".configs .account-settings .input input").keyup(function () {
$("#configs-apply-changes").removeClass("settings-changes");
$(".configs .account-settings .input input").each(function () {
var id = $(this).attr("id");
switch (id) {
case "username":
if (this.value != $("#menu-username").text())
$("#configs-apply-changes").addClass("settings-changes");
break;
case "api-key":
if (this.value != $("#user-api-key").val())
$("#configs-apply-changes").addClass("settings-changes");
break;
default:
if (this.value != "")
$("#configs-apply-changes").addClass("settings-changes");
break;
}
if ($("#configs-apply-changes").hasClass("settings-changes"))
$("#configs-apply-changes").show();
else if (!$("#configs-apply-changes").hasClass("configs-changes"))
$("#configs-apply-changes").hide();
});
});
$("#configs-apply-changes").click(function () {
if (self.userConfig) {
$(".username-check.green-text").remove();
if ($(".red-text").length) {
CountlyHelpers.notify({
title: jQuery.i18n.map["user-settings.please-correct-input"],
message: jQuery.i18n.map["configs.not-saved"],
type: "error"
});
return false;
}
var username = $(".configs #username").val(),
old_pwd = $(".configs #old_pwd").val(),
new_pwd = $(".configs #new_pwd").val(),
re_new_pwd = $(".configs #re_new_pwd").val(),
api_key = $(".configs #api-key").val();
var ignoreError = false;
if ((new_pwd.length && re_new_pwd.length) || api_key.length || username.length) {
ignoreError = true;
}
if ((new_pwd.length || re_new_pwd.length) && !old_pwd.length) {
CountlyHelpers.notify({
title: jQuery.i18n.map["configs.not-saved"],
message: jQuery.i18n.map["user-settings.old-password-match"],
type: "error"
});
return true;
}
if (new_pwd != re_new_pwd) {
CountlyHelpers.notify({
title: jQuery.i18n.map["configs.not-saved"],
message: jQuery.i18n.map["user-settings.password-match"],
type: "error"
});
return true;
}
if (new_pwd.length && new_pwd == old_pwd) {
CountlyHelpers.notify({
title: jQuery.i18n.map["configs.not-saved"],
message: jQuery.i18n.map["user-settings.password-not-old"],
type: "error"
});
return true;
}
if (api_key.length != 32) {
CountlyHelpers.notify({
title: jQuery.i18n.map["configs.not-saved"],
message: jQuery.i18n.map["user-settings.api-key-length"],
type: "error"
});
return true;
}
$.ajax({
type: "POST",
url: countlyGlobal["path"] + "/user/settings",
data: {
"username": username,
"old_pwd": old_pwd,
"new_pwd": new_pwd,
"api_key": api_key,
_csrf: countlyGlobal['csrf_token']
},
success: function (result) {
var saveResult = $(".configs #settings-save-result");
if (result == "username-exists") {
CountlyHelpers.notify({
title: jQuery.i18n.map["configs.not-saved"],
message: jQuery.i18n.map["management-users.username.exists"],
type: "error"
});
return true;
} else if (!result) {
CountlyHelpers.notify({
title: jQuery.i18n.map["configs.not-saved"],
message: jQuery.i18n.map["user-settings.alert"],
type: "error"
});
return true;
} else {
if (!isNaN(parseInt(result))) {
$("#new-install-overlay").fadeOut();
countlyGlobal["member"].password_changed = parseInt(result);
}
else if (typeof result === "string") {
CountlyHelpers.notify({
title: jQuery.i18n.map["configs.not-saved"],
message: jQuery.i18n.map[result],
type: "error"
});
return true;
}
$("#user-api-key").val(api_key);
$(".configs #old_pwd").val("");
$(".configs #new_pwd").val("");
$(".configs #re_new_pwd").val("");
$("#menu-username").text(username);
$("#user-api-key").val(api_key);
countlyGlobal["member"].username = username;
countlyGlobal["member"].api_key = api_key;
}
if (Object.keys(self.changes).length) {
countlyPlugins.updateUserConfigs(self.changes, function (err, services) {
if (err && !ignoreError) {
CountlyHelpers.notify({
title: jQuery.i18n.map["configs.not-saved"],
message: jQuery.i18n.map["configs.not-changed"],
type: "error"
});
}
else {
CountlyHelpers.notify({
title: jQuery.i18n.map["configs.changed"],
message: jQuery.i18n.map["configs.saved"]
});
self.configsData = JSON.parse(JSON.stringify(self.cache));
$("#configs-apply-changes").hide();
self.changes = {};
}
});
}
else {
CountlyHelpers.notify({
title: jQuery.i18n.map["configs.changed"],
message: jQuery.i18n.map["configs.saved"]
});
$("#configs-apply-changes").hide();
}
}
});
}
else {
countlyPlugins.updateConfigs(self.changes, function (err, services) {
if (err) {
CountlyHelpers.notify({
title: jQuery.i18n.map["configs.not-saved"],
message: jQuery.i18n.map["configs.not-changed"],
type: "error"
});
}
else {
location.hash = "#/manage/configurations/success/" + self.selectedNav.key;
window.location.reload(true);
}
});
}
});
$('#search-box').off('input').on('input', function () {
var searchKey = $(this).val().toLowerCase();
if (searchKey.length === 0) {
$('#nav-item-' + self.selectedNav.key).trigger('click');
return;
}
var searchResult = self.searchKeys.filter(function (item) {
return item.searchKeys.indexOf(searchKey.toLowerCase()) >= 0
});
var configGroups = [];
searchResult.forEach(function (result) {
var group = {
key: result.key,
rows: result.subItems.filter(function (field) { return field.searchKeys.indexOf(searchKey.toLowerCase()) >= 0 })
}
configGroups.push(group);
});
$('.config-container').removeClass('selected');
$('#config-title').html('RESULTS FOR: "' + searchKey + '"');
$('#config-table-container').removeClass('title-rows-hidden');
$('#config-table-container').addClass('title-rows-enable');
self.showSearchResult(configGroups);
if (configGroups.length === 0) {
$('#empty-search-result').show();
} else {
$('#empty-search-result').hide();
}
});
if (countlyGlobal["member"].global_admin) {
$(".user-row").show();
}
/*
Make header sticky if scroll is more than the height of header
This is done in order to make Apply Changes button visible
*/
var navigationTop = $("#sticky-config-header").offset().top;
$(window).on("resize", function (e) {
var $fixedHeader = $("#sticky-config-header");
if ($fixedHeader.hasClass("fixed")) {
var width = $("#content-container").width();
$fixedHeader.css({ width: width });
}
});
}
},
updateConfig: function (id, value) {
var configs = id.split(".");
//update cache
var data = this.cache;
for (var i = 0; i < configs.length; i++) {
if (typeof data[configs[i]] == "undefined") {
break;
}
else if (i == configs.length - 1) {
data[configs[i]] = value;
}
else {
data = data[configs[i]];
}
}
//add to changes
var data = this.changes;
for (var i = 0; i < configs.length; i++) {
if (i == configs.length - 1) {
data[configs[i]] = value;
}
else if (typeof data[configs[i]] == "undefined") {
data[configs[i]] = {};
}
data = data[configs[i]];
}
$("#configs-apply-changes").removeClass("configs-changes");
if (JSON.stringify(this.configsData) != JSON.stringify(this.cache)) {
$("#configs-apply-changes").addClass("configs-changes");
}
else {
this.changes = {};
}
if ($("#configs-apply-changes").hasClass("configs-changes"))
$("#configs-apply-changes").show();
else if (!$("#configs-apply-changes").hasClass("settings-changes"))
$("#configs-apply-changes").hide();
},
getSearchKeys: function (data) {
var result = Object.keys(data).reduce(function (prev, key) {
var dataItem = data[key];
var searchItem = {}
var searcKeyItems = Object.keys(dataItem).reduce(function (subPrev, subKey) {
var isCore = countlyGlobal.plugins.indexOf(key) === -1;
var titleText = jQuery.i18n.map["configs." + key + "-" + subKey] || jQuery.i18n.map[key + "." + subKey] || jQuery.i18n.map[key + "." + subKey.replace(/\_/g, '-')] || jQuery.i18n.map["userdata." + subKey] || subKey;
var helpText = jQuery.i18n.map["configs.help." + key + "-" + subKey] || "";
var searchItems = titleText + "," + helpText + "," + subKey + ",";
subPrev.subItems.push({
key: subKey,
searchKeys: searchItems.toLowerCase()
});
subPrev.wholeList += searchItems.toLowerCase();
return subPrev;
}, { wholeList: "", subItems: [] });
searchItem.searchKeys = searcKeyItems.wholeList;
searchItem.subItems = searcKeyItems.subItems;
delete searchItem.subItems.wholeList;
searchItem.key = key;
prev.push(searchItem);
return prev;
}, []);
return result;
},
generateConfigsTable: function (configsData, id) {
id = id || "";
var first = true;
if (id != "") {
first = false;
}
var configsHTML = "";
if (!first)
configsHTML += "<table class='d-table help-zone-vb' cellpadding='0' cellspacing='0'>";
var objectKeys = Object.keys(configsData);
if (id === ".logs") {
objectKeys.splice(objectKeys.indexOf("default"), 1);
objectKeys.unshift('default');
}
for (var a in objectKeys) {
var i = objectKeys[a];
if (typeof configsData[i] == "object" && i !== "_user") {
if (configsData[i] != null) {
var label = this.getInputLabel((id + "." + i).substring(1), i);
if (label) {
var display = i === this.selectedNav.key ? this.userConfig ? "table-row" : "block" : this.userConfig ? "table-row" : "none";
var category = "CORE";
var relatedNav = this.navTitles.coreTitles.find(function (x) { return x.key === i });
if (!relatedNav) {
category = "PLUGINS";
relatedNav = this.navTitles.pluginTitles.find(function (x) { return x.key === i });
}
configsHTML += "<tr id='config-table-row-" + i + "' style='display:" + display + "' class='config-table-row'>";
if (this.userConfig) {
configsHTML += "<td>" + relatedNav.label + "</td>";
}
configsHTML += "<td>" + this.generateConfigsTable(configsData[i], id + "." + i) + "</td>";
configsHTML += "</tr>";
}
}
else {
var input = this.getInputByType((id + "." + i).substring(1), "");
var label = this.getInputLabel((id + "." + i).substring(1), i);
if (input && label)
configsHTML += "<tr id='config-row-" + i + "-" + id.replace(".", "") + "' class='config-table-details-row'><td>" + label + "</td><td>" + input + "</td></tr>";
}
} else if (i === "_user") {
var hasSelectedData = Object.keys(configsData[i]).some(function (key) {
return configsData[i][key]
});
var label = '<div data-localize="' + jQuery.i18n.map["configs.user-level-configuration"] + '">' + jQuery.i18n.map["configs.user-level-configuration"] + '</div><span class="config-help" data-localize="' + jQuery.i18n.map["configs.help.user-level-configuration"] + '">' + jQuery.i18n.map["configs.help.user-level-configuration"] + '</span>';
var input = '<div class="cly-multi-select user-config-select ' + (hasSelectedData ? 'selection-exists' : '') + '" id="' + id.substring(1) + '._user" style="width: 100%; box-sizing: border-box;">'
input += '<div class="select-inner">';
input += '<div class="text-container">';
input += '<div class="text">';
input += '<div class="default-text"></div>';
for (var c in configsData[i]) {
if (configsData[i][c])
input += '<div class="selection" data-value="' + c + '">' + this.getLabelName((id + "." + c).substring(1), c).text + '<div class="remove"><i class="ion-android-close"></i></div></div>'
}
input += '</div>';
input += '</div>';
input += '<div class="right combo"></div>';
input += '</div>';
input += '<div class="select-items square" style="width: 100%;"><div>';
for (var c in configsData[i]) {
input += '<div data-value="' + c + '" class="item ' + (configsData[i][c] ? 'selected' : '') + '">' + this.getLabelName((id + "." + c).substring(1), c).text + '</div>';
}
input += '</div></div>';
input += '</div>';
configsHTML += "<tr id='config-row-" + i + "-user-conf' class='config-table-details-row user-row' style='display:none'><td>" + label + "</td><td>" + input + "</td></tr>";
} else {
var input = this.getInputByType((id + "." + i).substring(1), configsData[i]);
var label = this.getInputLabel((id + "." + i).substring(1), i);
if (input && label)
configsHTML += "<tr id='config-row-" + i + "-" + id.replace(".", "") + "' class='config-table-details-row'><td>" + label + "</td><td>" + input + "</td></tr>";
}
}
if (!first)
configsHTML += "</table>";
return configsHTML;
},
getLabelName: function (id, value) {
var ns = id.split(".")[0];
if (ns != "frontend" && ns != "api" && ns != "apps" && ns != "logs" && ns != "security" && countlyGlobal["plugins"].indexOf(ns) == -1) {
return null;
}
if (typeof this.predefinedLabels[id] != "undefined")
return { dataLocalize: this.predefinedLabels[id], text: jQuery.i18n.map[this.predefinedLabels[id]] };
else if (jQuery.i18n.map["configs." + id])
return { dataLocalize: 'configs." + id + "', text: jQuery.i18n.map["configs." + id] };
else if (jQuery.i18n.map["configs." + id.replace(".", "-")])
return { dataLocalize: 'configs." + id.replace(".", "-") + "', text: jQuery.i18n.map["configs." + id.replace(".", "-")] };
else if (jQuery.i18n.map[id])
return { dataLocalize: id, text: jQuery.i18n.map[id] };
else if (jQuery.i18n.map[id.replace(".", "-")])
return { dataLocalize: '" + id.replace(".", "-") + "', text: jQuery.i18n.map[id.replace(".", "-")] };
else
return { text: value };
},
getInputLabel: function (id, value,asLabel) {
var ns = id.split(".")[0];
if (ns != "frontend" && ns != "api" && ns != "apps" && ns != "logs" && ns != "security" && countlyGlobal["plugins"].indexOf(ns) == -1) {
return null;
}
var ret = "";
if (jQuery.i18n.map["configs.help." + id])
ret = "<span class='config-help' data-localize='configs.help." + id + "'>" + jQuery.i18n.map["configs.help." + id] + "</span>";
else if (jQuery.i18n.map["configs.help." + id.replace(".", "-")])
ret = "<span class='config-help' data-localize='configs.help." + id.replace(".", "-") + "'>" + jQuery.i18n.map["configs.help." + id.replace(".", "-")] + "</span>";
var labelNameItem = this.getLabelName(id, value);
if(asLabel)
return "<label data-localize='" + labelNameItem.dataLocalize + "'>" + labelNameItem.text + "</label>" + ret;
else
return "<div data-localize='" + labelNameItem.dataLocalize + "'>" + labelNameItem.text + "</div>" + ret;
},
getInputByType: function (id, value) {
if (this.predefinedInputs[id]) {
return this.predefinedInputs[id](value);
}
else if (typeof value == "boolean") {
var input = '<div class="on-off-switch">';
if (value) {
input += '<input type="checkbox" name="on-off-switch" class="on-off-switch-checkbox" id="' + id + '" checked>';
} else {
input += '<input type="checkbox" name="on-off-switch" class="on-off-switch-checkbox" id="' + id + '">';
}
input += '<label class="on-off-switch-label" for="' + id + '"></label>';
input += '<span class="text">' + jQuery.i18n.map["plugins.enable"] + '</span>';
input +="</div>";
return input;
}
else if (typeof value == "number") {
return "<input type='number' id='" + id + "' value='" + value + "' max='2147483647' onkeyup='this.value= (parseInt(this.value) > 2147483647) ? 2147483647 : this.value;'/>";
}
else
return "<input type='text' id='" + id + "' value='" + value + "'/>";
},
getLabel: function (id) {
if (countlyGlobal["plugins"].indexOf(id) == -1)
return jQuery.i18n.map["configs." + id];
var ret = "";
if (jQuery.i18n.map["configs.help." + id])
ret = jQuery.i18n.map["configs.help." + id];
else if (jQuery.i18n.map["configs.help." + id.replace(".", "-")])
ret = jQuery.i18n.map["configs.help." + id.replace(".", "-")];
if (typeof this.predefinedLabels[id] != "undefined")
return jQuery.i18n.map[this.predefinedLabels[id]] + ret;
else if (jQuery.i18n.map["configs." + id])
return jQuery.i18n.map["configs." + id] + ret;
else if (jQuery.i18n.map["configs." + id.replace(".", "-")])
return jQuery.i18n.map["configs." + id.replace(".", "-")] + ret;
else if (jQuery.i18n.map[id])
return jQuery.i18n.map[id] + ret;
else if (jQuery.i18n.map[id.replace(".", "-")])
return jQuery.i18n.map[id.replace(".", "-")] + ret;
else
return id + ret;
},
setNavTitles: function (configdata) {
var pluginTitles = [], coreTitles = [];
var self = this;
var coreDefaults = ['frontend', 'security', 'api', 'apps', 'logs'];
Object.keys(configdata).forEach(function (key) {
if (coreDefaults.indexOf(key) >= 0)
coreTitles.push({ key: key, label: self.getLabel(key) });
else if (countlyGlobal["plugins"].indexOf(key) >= 0)
pluginTitles.push({ key: key, label: self.getLabel(key) });
});
coreTitles = coreTitles.sort(function (a, b) { return a > b; });
pluginTitles = pluginTitles.sort(function (a, b) { return a > b; });
return {
coreTitles: coreTitles,
pluginTitles: pluginTitles
}
},
showSearchResult: function (results) {
$('.config-table-row-header').hide();
$('.config-table-row').hide();
$('.config-table-details-row').hide();
results.forEach(function (result) {
$('#config-table-row-header-' + result.key).show();
$('#config-table-row-' + result.key).show();
result.rows.forEach(function (row) {
$('#config-row-' + row.key + '-' + result.key).show();
})
})
},
registerInput: function (id, callback) {
this.predefinedInputs[id] = callback;
},
registerLabel: function (id, html) {
this.predefinedLabels[id] = html;
},
refresh: function () {
}
});
//register views
app.pluginsView = new PluginsView();
app.configurationsView = new ConfigurationsView();
if (countlyGlobal["member"].global_admin) {
var showInAppManagment={"api":{"safe": true,"session_duration_limit": true,"city_data": true,"event_limit": true,"event_segmentation_limit": true,"event_segmentation_value_limit": true,"metric_limit": true,"session_cooldown": true,"total_users": true,"prevent_duplicate_requests": true,"metric_changes": true}}
var configManagementPromise = null;
for (var key in showInAppManagment) {
app.addAppManagementView(key, jQuery.i18n.map['configs.'+key], countlyManagementView.extend({
key:key,
initialize: function () {
this.plugin = this.key;
},
generateTemplate: function(id) {
var fields='';
this.configsData = countlyPlugins.getConfigsData();
id = this.key || id;
this.cache={}
this.templateData={};
var appConfigData = this.config();
for (var i in showInAppManagment[id]){
if(showInAppManagment[id][i]==true) {
this.templateData[i] = this.configsData[id][i];
var myvalue = this.configsData[id][i];
if(appConfigData && appConfigData[i])
myvalue = appConfigData[i];
var input = app.configurationsView.getInputByType((id + "." + i), myvalue);
var label = app.configurationsView.getInputLabel((id + "." + i), i,true);
if (input && label) {
fields+=('<div id="config-row-' + i + "-" + id.replace(".", "") + '" class="mgmt-plugins-row help-zone-vs" data-help-localize="help.mgmt-plugins.push.ios.type">'+
' <div>'+label+'</div>'+
' <div>'+input+'</div>'+
'</div>');
}
}
}
return fields;
},
doOnChange: function(name, value) {
if(name) {
name = name.substring(this.key.length+1);
if (name && countlyCommon.dot(this.templateData, name) !== value) {
countlyCommon.dot(this.templateData, name, value);
}
if (this.isSaveAvailable()) {
this.el.find('.icon-button').show();
} else {
this.el.find('.icon-button').hide();
}
this.onChange(name, value);
}
},
beforeRender: function() {
var self=this;
if(!configManagementPromise){
configManagementPromise = $.when(countlyPlugins.initializeConfigs());
}
return $.when(configManagementPromise).then(function () {
configManagementPromise = null;
self.template = Handlebars.compile(self.generateTemplate(self.key));
self.savedTemplateData = JSON.stringify(self.templateData);
}).then(function () {});
}
}));
}
app.route('/manage/plugins', 'plugins', function () {
this.renderWhenReady(this.pluginsView);
});
app.route('/manage/configurations', 'configurations', function () {
this.configurationsView.namespace = null;
this.configurationsView.reset = false;
this.configurationsView.userConfig = false;
this.configurationsView.success = false;
this.renderWhenReady(this.configurationsView);
});
app.route('/manage/configurations/:namespace', 'configurations_namespace', function (namespace) {
if (namespace == "reset") {
this.configurationsView.namespace = null;
this.configurationsView.reset = true;
this.configurationsView.userConfig = false;
this.configurationsView.success = false;
this.renderWhenReady(this.configurationsView);
} else {
this.configurationsView.namespace = namespace;
this.configurationsView.reset = false;
this.configurationsView.userConfig = false;
this.configurationsView.success = false;
this.renderWhenReady(this.configurationsView);
}
});
app.route('/manage/configurations/:status/:namespace', 'configurations_namespace', function (status, namespace) {
if (status == "success") {
this.configurationsView.namespace = namespace;
this.configurationsView.reset = false;
this.configurationsView.userConfig = false;
this.configurationsView.success = true;
this.renderWhenReady(this.configurationsView);
}
});
}
app.route('/manage/user-settings', 'user-settings', function () {
this.configurationsView.namespace = null;
this.configurationsView.reset = false;
this.configurationsView.userConfig = true;
this.configurationsView.success = false;
this.renderWhenReady(this.configurationsView);
});
app.route('/manage/user-settings/:namespace', 'user-settings_namespace', function (namespace) {
if (namespace == "reset") {
this.configurationsView.reset = true;
this.configurationsView.success = false;
this.configurationsView.namespace = null;
}
else {
this.configurationsView.reset = false;
this.configurationsView.success = false;
this.configurationsView.namespace = namespace;
}
this.configurationsView.userConfig = true;
this.renderWhenReady(this.configurationsView);
});
app.addPageScript("/manage/plugins", function () {
$("#plugins-selector").find(">.button").click(function () {
if ($(this).hasClass("selected")) {
return true;
}
$(".plugins-selector").removeClass("selected").removeClass("active");
var filter = $(this).attr("id");
app.activeView.filterPlugins(filter);
});
var plugins = _.clone(countlyGlobal["plugins"]);
$("#plugins-table").on("change", ".on-off-switch input", function () {
var $checkBox = $(this),
plugin = $checkBox.attr("id").replace(/^plugin-/, '');
if ($checkBox.is(":checked")) {
plugins.push(plugin);
plugins = _.uniq(plugins);
} else {
plugins = _.without(plugins, plugin);
}
if (_.difference(countlyGlobal["plugins"], plugins).length == 0 &&
_.difference(plugins, countlyGlobal["plugins"]).length == 0) {
$(".btn-plugin-enabler").hide();
} else {
$(".btn-plugin-enabler").show();
}
});
$(document).on("click", ".btn-plugin-enabler", function () {
var plugins = {};
$("#plugins-table").find(".on-off-switch input").each(function () {
var plugin = this.id.toString().replace(/^plugin-/, ''),
state = ($(this).is(":checked")) ? true : false;
plugins[plugin] = state;
});
var text = jQuery.i18n.map["plugins.confirm"];
var msg = { title: jQuery.i18n.map["plugins.processing"], message: jQuery.i18n.map["plugins.wait"], info: jQuery.i18n.map["plugins.hold-on"], sticky: true };
CountlyHelpers.confirm(text, "popStyleGreen popStyleGreenWide", function (result) {
if (!result) {
return true;
}
CountlyHelpers.notify(msg);
app.activeView.togglePlugin(plugins);
}, [jQuery.i18n.map["common.no-dont-continue"], jQuery.i18n.map["plugins.yes-i-want-to-apply-changes"]], { title: jQuery.i18n.map["plugins-apply-changes-to-plugins"], image: "apply-changes-to-plugins" });
});
});
$(document).ready(function () {
if (countlyGlobal["member"] && countlyGlobal["member"]["global_admin"]) {
var menu = '<a href="#/manage/plugins" class="item">' +
'<div class="logo-icon fa fa-puzzle-piece"></div>' +
'<div class="text" data-localize="plugins.title"></div>' +
'</a>';
if ($('#management-submenu .help-toggle').length)
$('#management-submenu .help-toggle').before(menu);
var menu = '<a href="#/manage/configurations" class="item">' +
'<div class="logo-icon fa fa-wrench"></div>' +
'<div class="text" data-localize="plugins.configs"></div>' +
'</a>';
if ($('#management-submenu .help-toggle').length)
$('#management-submenu .help-toggle').before(menu);
}
}); | 1 | 13,128 | Can you check this case Assuming you have a dashboard with 2 users, test1 and test2 Currently logged in user is test1. You should not be able to change it to test2. You open dev console and paste in `countlyGlobal["member"].username = "test2";` then you enter value test2 in the input and it is allowed and form would be submitted. Would server side validation deal with such case? | Countly-countly-server | js |
@@ -46,13 +46,14 @@ public class DataSourceFactory {
static final long MAX_TTL_CONN_MS = TimeUnit.MILLISECONDS.convert(10L, TimeUnit.MINUTES);
static final String MYSQL_VALIDATION_QUERY = "/* ping */ SELECT 1";
+ static final String DRIVER_CLASS_NAME = "athenz.db.driver.class";
public static PoolableDataSource create(String url, Properties mysqlConnectionProperties) {
String driver = null;
try {
if (url.indexOf(":mysql:") > 0) {
- driver = "com.mysql.jdbc.Driver";
+ driver = System.getProperty(DRIVER_CLASS_NAME, "com.mysql.jdbc.Driver");
Class.forName(driver);
ConnectionFactory connectionFactory = | 1 | /*
* Copyright 2016 Yahoo Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.athenz.common.server.db;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import org.apache.commons.dbcp2.ConnectionFactory;
import org.apache.commons.dbcp2.DriverManagerConnectionFactory;
import org.apache.commons.dbcp2.PoolableConnection;
import org.apache.commons.dbcp2.PoolableConnectionFactory;
import org.apache.commons.pool2.ObjectPool;
import org.apache.commons.pool2.impl.GenericObjectPool;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.apache.commons.pool2.impl.BaseObjectPoolConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DataSourceFactory {
private static final Logger LOG = LoggerFactory.getLogger(DataSourceFactory.class);
static final String ATHENZ_PROP_DBPOOL_MAX_TOTAL = "athenz.db.pool_max_total";
static final String ATHENZ_PROP_DBPOOL_MAX_IDLE = "athenz.db.pool_max_idle";
static final String ATHENZ_PROP_DBPOOL_MIN_IDLE = "athenz.db.pool_min_idle";
static final String ATHENZ_PROP_DBPOOL_MAX_WAIT = "athenz.db.pool_max_wait";
static final String ATHENZ_PROP_DBPOOL_EVICT_IDLE_TIMEOUT = "athenz.db.pool_evict_idle_timeout";
static final String ATHENZ_PROP_DBPOOL_EVICT_IDLE_INTERVAL = "athenz.db.pool_evict_idle_interval";
static final String ATHENZ_PROP_DBPOOL_MAX_TTL = "athenz.db.pool_max_ttl";
static final String ATHENZ_PROP_DBPOOL_VALIDATION_QUERY = "athenz.db.pool_validation_query";
static final long MAX_TTL_CONN_MS = TimeUnit.MILLISECONDS.convert(10L, TimeUnit.MINUTES);
static final String MYSQL_VALIDATION_QUERY = "/* ping */ SELECT 1";
public static PoolableDataSource create(String url, Properties mysqlConnectionProperties) {
String driver = null;
try {
if (url.indexOf(":mysql:") > 0) {
driver = "com.mysql.jdbc.Driver";
Class.forName(driver);
ConnectionFactory connectionFactory =
new DriverManagerConnectionFactory(url, mysqlConnectionProperties);
return create(connectionFactory);
} else {
throw new RuntimeException("Cannot figure out how to instantiate this data source: " + url);
}
} catch (ClassNotFoundException e) {
throw new RuntimeException("Cannot load driver class: " + driver);
} catch (Exception exc) {
throw new RuntimeException("Failed to create database source(" +
url + ") with driver(" + driver + ")", exc);
}
}
static long retrieveConfigSetting(String propName, long defaultValue) {
String propValue = System.getProperty(propName);
if (propValue == null) {
return defaultValue;
}
long value = defaultValue;
try {
value = Long.parseLong(propValue);
} catch (NumberFormatException ex) {
if (LOG.isWarnEnabled()) {
LOG.warn("Ignoring Invalid number({}) set in system property({}). Using default ({})",
propValue, propName, defaultValue);
}
}
return value;
}
static int retrieveConfigSetting(String propName, int defaultValue) {
String propValue = System.getProperty(propName);
if (propValue == null) {
return defaultValue;
}
int value = defaultValue;
try {
value = Integer.parseInt(propValue);
} catch (NumberFormatException ex) {
if (LOG.isWarnEnabled()) {
LOG.warn("Ignoring Invalid number({}) set in system property({}). Using default ({})",
propValue, propName, defaultValue);
}
}
return value;
}
public static GenericObjectPoolConfig setupPoolConfig() {
// setup config vars for the object pool
// ie. min and max idle instances, and max total instances of arbitrary objects
GenericObjectPoolConfig config = new GenericObjectPoolConfig();
// The maximum number of active connections that can be allocated from
// this pool at the same time, or negative for no limit. Default: 8
config.setMaxTotal(retrieveConfigSetting(ATHENZ_PROP_DBPOOL_MAX_TOTAL,
GenericObjectPoolConfig.DEFAULT_MAX_TOTAL));
if (config.getMaxTotal() == 0) {
config.setMaxTotal(-1); // -1 means no limit
}
// The maximum number of connections that can remain idle in the pool,
// without extra ones being released, or negative for no limit. Default 8
config.setMaxIdle(retrieveConfigSetting(ATHENZ_PROP_DBPOOL_MAX_IDLE,
GenericObjectPoolConfig.DEFAULT_MAX_IDLE));
if (config.getMaxIdle() == 0) {
config.setMaxIdle(-1); // -1 means no limit
}
// The minimum number of connections that can remain idle in the pool,
// without extra ones being created, or zero to create none. Default 0
config.setMinIdle(retrieveConfigSetting(ATHENZ_PROP_DBPOOL_MIN_IDLE,
GenericObjectPoolConfig.DEFAULT_MIN_IDLE));
// The maximum number of milliseconds that the pool will wait (when
// there are no available connections) for a connection to be returned
// before throwing an exception, or -1 to wait indefinitely. Default -1
config.setMaxWaitMillis(retrieveConfigSetting(ATHENZ_PROP_DBPOOL_MAX_WAIT,
GenericObjectPoolConfig.DEFAULT_MAX_WAIT_MILLIS));
// setup the configuration to cleanup idle connections
//
// Minimum time an object can be idle in the pool before being eligible
// for eviction by the idle object evictor.
// The default value is 30 minutes (1000 * 60 * 30).
config.setMinEvictableIdleTimeMillis(retrieveConfigSetting(ATHENZ_PROP_DBPOOL_EVICT_IDLE_TIMEOUT,
BaseObjectPoolConfig.DEFAULT_MIN_EVICTABLE_IDLE_TIME_MILLIS));
// Number of milliseconds to sleep between runs of idle object evictor thread.
// Not using DEFAULT_TIME_BETWEEN_EVICTION_RUNS_MILLIS since it is -1
// meaning it will not run the evictor thread and instead we're using
// the default min value for evictable idle connections (Default 30 minutes)
config.setTimeBetweenEvictionRunsMillis(retrieveConfigSetting(ATHENZ_PROP_DBPOOL_EVICT_IDLE_INTERVAL,
BaseObjectPoolConfig.DEFAULT_MIN_EVICTABLE_IDLE_TIME_MILLIS));
if (LOG.isDebugEnabled()) {
LOG.debug("Config settings for idle object eviction: " +
"time interval between eviction thread runs (" +
config.getTimeBetweenEvictionRunsMillis() +
" millis): minimum timeout for idle objects (" +
config.getMinEvictableIdleTimeMillis() + " millis)");
}
// Validate objects by the idle object evictor. If invalid, gets dropped
// from the pool.
config.setTestWhileIdle(true);
// Validate object before borrowing from pool and returning to the pool.
// If invalid, gets dropped from the pool and an attempt to borrow
// another one will occur.
config.setTestOnBorrow(true);
config.setTestOnReturn(true);
return config;
}
static PoolableDataSource create(ConnectionFactory connectionFactory) {
// setup our pool config object
GenericObjectPoolConfig config = setupPoolConfig();
PoolableConnectionFactory poolableConnectionFactory =
new PoolableConnectionFactory(connectionFactory, null);
// Set max lifetime of a connection in milli-secs, after which it will
// always fail activation, passivation, and validation.
// Value of -1 means infinite life time. The default value
// defined in this class is 10 minutes.
long connTtlMillis = retrieveConfigSetting(ATHENZ_PROP_DBPOOL_MAX_TTL, MAX_TTL_CONN_MS);
poolableConnectionFactory.setMaxConnLifetimeMillis(connTtlMillis);
if (LOG.isInfoEnabled()) {
LOG.info("Setting Time-To-Live interval for live connections ({}) msecs", connTtlMillis);
}
// set the validation query for our jdbc connector
final String validationQuery = System.getProperty(ATHENZ_PROP_DBPOOL_VALIDATION_QUERY, MYSQL_VALIDATION_QUERY);
poolableConnectionFactory.setValidationQuery(validationQuery);
ObjectPool<PoolableConnection> connectionPool =
new GenericObjectPool<>(poolableConnectionFactory, config);
poolableConnectionFactory.setPool(connectionPool);
return new AthenzDataSource(connectionPool);
}
}
| 1 | 4,820 | since we're changing this class, let's fix the default class name as well. it should be com.mysql.cj.jdbc.Driver | AthenZ-athenz | java |
@@ -3,4 +3,13 @@ class Role < ActiveRecord::Base
has_many :proposals, through: :proposal_roles
has_many :user_roles
has_many :users, through: :user_roles
+
+ # Safely ensure that the system roles in `config/initializers/01_c2.rb`
+ # exist in the database. Rails will run this code when it loads the
+ # `Role` class at bootup.
+ def self.ensure_system_roles_exist
+ SYSTEM_ROLES.each { |r| Role.find_or_create_by!(name: r) }
+ end
+
+ ensure_system_roles_exist if connection.table_exists? "roles"
end | 1 | class Role < ActiveRecord::Base
has_many :proposal_roles
has_many :proposals, through: :proposal_roles
has_many :user_roles
has_many :users, through: :user_roles
end
| 1 | 17,638 | At load time, we ensure the config'd objects exist. | 18F-C2 | rb |
@@ -153,6 +153,10 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core
{
throw new InvalidOperationException($"HTTPS endpoints can only be configured using {nameof(KestrelServerOptions)}.{nameof(KestrelServerOptions.Listen)}().");
}
+ else if (!parsedAddress.Scheme.Equals("http", StringComparison.OrdinalIgnoreCase))
+ {
+ throw new InvalidOperationException($"Unrecognized scheme in server address '{address}'. Only 'http://' is supported.");
+ }
if (!string.IsNullOrEmpty(parsedAddress.PathBase))
{ | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Hosting.Server;
using Microsoft.AspNetCore.Hosting.Server.Features;
using Microsoft.AspNetCore.Http.Features;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure;
using Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
namespace Microsoft.AspNetCore.Server.Kestrel.Core
{
public class KestrelServer : IServer
{
private readonly List<ITransport> _transports = new List<ITransport>();
private readonly ILogger _logger;
private readonly IServerAddressesFeature _serverAddresses;
private readonly ITransportFactory _transportFactory;
private bool _isRunning;
private int _stopped;
private Heartbeat _heartbeat;
public KestrelServer(
IOptions<KestrelServerOptions> options,
ITransportFactory transportFactory,
ILoggerFactory loggerFactory)
{
if (options == null)
{
throw new ArgumentNullException(nameof(options));
}
if (transportFactory == null)
{
throw new ArgumentNullException(nameof(transportFactory));
}
if (loggerFactory == null)
{
throw new ArgumentNullException(nameof(loggerFactory));
}
Options = options.Value ?? new KestrelServerOptions();
InternalOptions = new InternalKestrelServerOptions();
_transportFactory = transportFactory;
_logger = loggerFactory.CreateLogger("Microsoft.AspNetCore.Server.Kestrel");
Features = new FeatureCollection();
_serverAddresses = new ServerAddressesFeature();
Features.Set(_serverAddresses);
Features.Set(InternalOptions);
}
public IFeatureCollection Features { get; }
public KestrelServerOptions Options { get; }
private InternalKestrelServerOptions InternalOptions { get; }
public async Task StartAsync<TContext>(IHttpApplication<TContext> application, CancellationToken cancellationToken)
{
try
{
if (!BitConverter.IsLittleEndian)
{
throw new PlatformNotSupportedException("Kestrel does not support big-endian architectures.");
}
ValidateOptions();
if (_isRunning)
{
// The server has already started and/or has not been cleaned up yet
throw new InvalidOperationException("Server has already started.");
}
_isRunning = true;
var trace = new KestrelTrace(_logger);
var systemClock = new SystemClock();
var dateHeaderValueManager = new DateHeaderValueManager(systemClock);
var connectionManager = new FrameConnectionManager();
_heartbeat = new Heartbeat(new IHeartbeatHandler[] { dateHeaderValueManager, connectionManager }, systemClock, trace);
IThreadPool threadPool;
if (InternalOptions.ThreadPoolDispatching)
{
threadPool = new LoggingThreadPool(trace);
}
else
{
threadPool = new InlineLoggingThreadPool(trace);
}
var serviceContext = new ServiceContext
{
Log = trace,
HttpParserFactory = frameParser => new HttpParser<FrameAdapter>(frameParser.Frame.ServiceContext.Log),
ThreadPool = threadPool,
SystemClock = systemClock,
DateHeaderValueManager = dateHeaderValueManager,
ConnectionManager = connectionManager,
ServerOptions = Options
};
var listenOptions = Options.ListenOptions;
var hasListenOptions = listenOptions.Any();
var hasServerAddresses = _serverAddresses.Addresses.Any();
if (hasListenOptions && hasServerAddresses)
{
var joined = string.Join(", ", _serverAddresses.Addresses);
_logger.LogWarning($"Overriding address(es) '{joined}'. Binding to endpoints defined in UseKestrel() instead.");
_serverAddresses.Addresses.Clear();
}
else if (!hasListenOptions && !hasServerAddresses)
{
_logger.LogDebug($"No listening endpoints were configured. Binding to {Constants.DefaultServerAddress} by default.");
// "localhost" for both IPv4 and IPv6 can't be represented as an IPEndPoint.
await StartLocalhostAsync(ServerAddress.FromUrl(Constants.DefaultServerAddress), serviceContext, application, cancellationToken).ConfigureAwait(false);
// If StartLocalhost doesn't throw, there is at least one listener.
// The port cannot change for "localhost".
_serverAddresses.Addresses.Add(Constants.DefaultServerAddress);
return;
}
else if (!hasListenOptions)
{
// If no endpoints are configured directly using KestrelServerOptions, use those configured via the IServerAddressesFeature.
var copiedAddresses = _serverAddresses.Addresses.ToArray();
_serverAddresses.Addresses.Clear();
foreach (var address in copiedAddresses)
{
var parsedAddress = ServerAddress.FromUrl(address);
if (parsedAddress.Scheme.Equals("https", StringComparison.OrdinalIgnoreCase))
{
throw new InvalidOperationException($"HTTPS endpoints can only be configured using {nameof(KestrelServerOptions)}.{nameof(KestrelServerOptions.Listen)}().");
}
if (!string.IsNullOrEmpty(parsedAddress.PathBase))
{
throw new InvalidOperationException($"A path base can only be configured using {nameof(IApplicationBuilder)}.UsePathBase().");
}
if (!string.IsNullOrEmpty(parsedAddress.PathBase))
{
_logger.LogWarning($"Path base in address {address} is not supported and will be ignored. To specify a path base, use {nameof(IApplicationBuilder)}.UsePathBase().");
}
if (parsedAddress.IsUnixPipe)
{
listenOptions.Add(new ListenOptions(parsedAddress.UnixPipePath)
{
Scheme = parsedAddress.Scheme,
});
}
else
{
if (string.Equals(parsedAddress.Host, "localhost", StringComparison.OrdinalIgnoreCase))
{
// "localhost" for both IPv4 and IPv6 can't be represented as an IPEndPoint.
await StartLocalhostAsync(parsedAddress, serviceContext, application, cancellationToken).ConfigureAwait(false);
// If StartLocalhost doesn't throw, there is at least one listener.
// The port cannot change for "localhost".
_serverAddresses.Addresses.Add(parsedAddress.ToString());
}
else
{
// These endPoints will be added later to _serverAddresses.Addresses
listenOptions.Add(new ListenOptions(CreateIPEndPoint(parsedAddress))
{
Scheme = parsedAddress.Scheme,
});
}
}
}
}
foreach (var endPoint in listenOptions)
{
var connectionHandler = new ConnectionHandler<TContext>(endPoint, serviceContext, application);
var transport = _transportFactory.Create(endPoint, connectionHandler);
_transports.Add(transport);
try
{
await transport.BindAsync().ConfigureAwait(false);
}
catch (AddressInUseException ex)
{
throw new IOException($"Failed to bind to address {endPoint}: address already in use.", ex);
}
// If requested port was "0", replace with assigned dynamic port.
_serverAddresses.Addresses.Add(endPoint.ToString());
}
}
catch (Exception ex)
{
_logger.LogCritical(0, ex, "Unable to start Kestrel.");
Dispose();
throw;
}
}
// Graceful shutdown if possible
public async Task StopAsync(CancellationToken cancellationToken)
{
if (Interlocked.Exchange(ref _stopped, 1) == 1)
{
return;
}
if (_transports != null)
{
var tasks = new Task[_transports.Count];
for (int i = 0; i < _transports.Count; i++)
{
tasks[i] = _transports[i].UnbindAsync();
}
await Task.WhenAll(tasks).ConfigureAwait(false);
// TODO: Do transport-agnostic connection management/shutdown.
for (int i = 0; i < _transports.Count; i++)
{
tasks[i] = _transports[i].StopAsync();
}
await Task.WhenAll(tasks).ConfigureAwait(false);
}
_heartbeat?.Dispose();
}
// Ungraceful shutdown
public void Dispose()
{
var cancelledTokenSource = new CancellationTokenSource();
cancelledTokenSource.Cancel();
StopAsync(cancelledTokenSource.Token).GetAwaiter().GetResult();
}
private void ValidateOptions()
{
if (Options.Limits.MaxRequestBufferSize.HasValue &&
Options.Limits.MaxRequestBufferSize < Options.Limits.MaxRequestLineSize)
{
throw new InvalidOperationException(
$"Maximum request buffer size ({Options.Limits.MaxRequestBufferSize.Value}) must be greater than or equal to maximum request line size ({Options.Limits.MaxRequestLineSize}).");
}
if (Options.Limits.MaxRequestBufferSize.HasValue &&
Options.Limits.MaxRequestBufferSize < Options.Limits.MaxRequestHeadersTotalSize)
{
throw new InvalidOperationException(
$"Maximum request buffer size ({Options.Limits.MaxRequestBufferSize.Value}) must be greater than or equal to maximum request headers size ({Options.Limits.MaxRequestHeadersTotalSize}).");
}
}
private async Task StartLocalhostAsync<TContext>(ServerAddress parsedAddress, ServiceContext serviceContext, IHttpApplication<TContext> application, CancellationToken cancellationToken)
{
if (parsedAddress.Port == 0)
{
throw new InvalidOperationException("Dynamic port binding is not supported when binding to localhost. You must either bind to 127.0.0.1:0 or [::1]:0, or both.");
}
var exceptions = new List<Exception>();
try
{
var ipv4ListenOptions = new ListenOptions(new IPEndPoint(IPAddress.Loopback, parsedAddress.Port))
{
Scheme = parsedAddress.Scheme,
};
var connectionHandler = new ConnectionHandler<TContext>(ipv4ListenOptions, serviceContext, application);
var transport = _transportFactory.Create(ipv4ListenOptions, connectionHandler);
_transports.Add(transport);
await transport.BindAsync();
}
catch (AddressInUseException ex)
{
throw new IOException($"Failed to bind to address {parsedAddress} on the IPv4 loopback interface: port already in use.", ex);
}
catch (Exception ex)
{
_logger.LogWarning(0, $"Unable to bind to {parsedAddress} on the IPv4 loopback interface: ({ex.Message})");
exceptions.Add(ex);
}
try
{
var ipv6ListenOptions = new ListenOptions(new IPEndPoint(IPAddress.IPv6Loopback, parsedAddress.Port))
{
Scheme = parsedAddress.Scheme,
};
var connectionHandler = new ConnectionHandler<TContext>(ipv6ListenOptions, serviceContext, application);
var transport = _transportFactory.Create(ipv6ListenOptions, connectionHandler);
_transports.Add(transport);
await transport.BindAsync();
}
catch (AddressInUseException ex)
{
throw new IOException($"Failed to bind to address {parsedAddress} on the IPv6 loopback interface: port already in use.", ex);
}
catch (Exception ex)
{
_logger.LogWarning(0, $"Unable to bind to {parsedAddress} on the IPv6 loopback interface: ({ex.Message})");
exceptions.Add(ex);
}
if (exceptions.Count == 2)
{
throw new IOException($"Failed to bind to address {parsedAddress}.", new AggregateException(exceptions));
}
}
/// <summary>
/// Returns an <see cref="IPEndPoint"/> for the given host an port.
/// If the host parameter isn't "localhost" or an IP address, use IPAddress.Any.
/// </summary>
internal static IPEndPoint CreateIPEndPoint(ServerAddress address)
{
IPAddress ip;
if (!IPAddress.TryParse(address.Host, out ip))
{
ip = IPAddress.IPv6Any;
}
return new IPEndPoint(ip, address.Port);
}
}
} | 1 | 12,766 | Is there a test for this case? | aspnet-KestrelHttpServer | .cs |
@@ -0,0 +1,14 @@
+<section class="topic">
+ <header>
+ <p class="topic-headline"><%= show.name %></p>
+ </header>
+
+ <%= render partial: "products/videos/video", collection: show.latest_videos %>
+
+ <figure class="card see-more">
+ <%= link_to show do %>
+ <h4><%= pluralize show.videos.count, "more #{show.name} episode" %></h4>
+ <p>View All</p>
+ <% end %>
+ </figure>
+</section> | 1 | 1 | 11,269 | This copy doesn't match what I had in my mockup. I think it's better to call them episodes. | thoughtbot-upcase | rb |
|
@@ -135,7 +135,11 @@ func (s *server) startService() common.Daemon {
servicePortMap := make(map[string]int)
for roleName, svcCfg := range s.cfg.Services {
serviceName := getServiceName(roleName)
- servicePortMap[serviceName] = svcCfg.RPC.Port
+ if serviceName == common.FrontendServiceName || serviceName == common.MatchingServiceName {
+ servicePortMap[serviceName] = svcCfg.RPC.GRPCPort
+ } else {
+ servicePortMap[serviceName] = svcCfg.RPC.Port
+ }
}
params.MembershipFactory, err = s.cfg.Ringpop.NewFactory( | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package temporal
import (
"log"
"time"
"google.golang.org/grpc"
persist "github.com/temporalio/temporal/.gen/go/persistenceblobs"
"github.com/temporalio/temporal/common/persistence"
persistenceClient "github.com/temporalio/temporal/common/persistence/client"
"github.com/temporalio/temporal/common/authorization"
"go.uber.org/zap"
"go.temporal.io/temporal-proto/workflowservice"
"github.com/temporalio/temporal/client"
"github.com/temporalio/temporal/common"
"github.com/temporalio/temporal/common/archiver"
"github.com/temporalio/temporal/common/archiver/provider"
"github.com/temporalio/temporal/common/cluster"
"github.com/temporalio/temporal/common/elasticsearch"
l "github.com/temporalio/temporal/common/log"
"github.com/temporalio/temporal/common/log/loggerimpl"
"github.com/temporalio/temporal/common/log/tag"
"github.com/temporalio/temporal/common/messaging"
"github.com/temporalio/temporal/common/metrics"
"github.com/temporalio/temporal/common/service"
"github.com/temporalio/temporal/common/service/config"
"github.com/temporalio/temporal/common/service/dynamicconfig"
"github.com/temporalio/temporal/service/frontend"
"github.com/temporalio/temporal/service/history"
"github.com/temporalio/temporal/service/matching"
"github.com/temporalio/temporal/service/worker"
)
type (
server struct {
name string
cfg *config.Config
doneC chan struct{}
daemon common.Daemon
}
)
const (
frontendService = "frontend"
historyService = "history"
matchingService = "matching"
workerService = "worker"
)
// newServer returns a new instance of a daemon
// that represents a cadence service
func newServer(service string, cfg *config.Config) common.Daemon {
return &server{
cfg: cfg,
name: service,
doneC: make(chan struct{}),
}
}
// Start starts the server
func (s *server) Start() {
if _, ok := s.cfg.Services[s.name]; !ok {
log.Fatalf("`%v` service missing config", s)
}
s.daemon = s.startService()
}
// Stop stops the server
func (s *server) Stop() {
if s.daemon == nil {
return
}
select {
case <-s.doneC:
default:
s.daemon.Stop()
select {
case <-s.doneC:
case <-time.After(time.Minute):
log.Printf("timed out waiting for server %v to exit\n", s.name)
}
}
}
// startService starts a service with the given name and config
func (s *server) startService() common.Daemon {
var err error
params := service.BootstrapParams{}
params.Name = getServiceName(s.name)
params.Logger = loggerimpl.NewLogger(s.cfg.Log.NewZapLogger())
params.PersistenceConfig = s.cfg.Persistence
params.DynamicConfig, err = dynamicconfig.NewFileBasedClient(&s.cfg.DynamicConfigClient, params.Logger.WithTags(tag.Service(params.Name)), s.doneC)
if err != nil {
log.Printf("error creating file based dynamic config client, use no-op config client instead. error: %v", err)
params.DynamicConfig = dynamicconfig.NewNopClient()
}
dc := dynamicconfig.NewCollection(params.DynamicConfig, params.Logger)
svcCfg := s.cfg.Services[s.name]
params.MetricScope = svcCfg.Metrics.NewScope(params.Logger)
params.RPCFactory = svcCfg.RPC.NewFactory(params.Name, params.Logger)
// Ringpop uses a different port to register handlers, this map is needed to resolve
// services to correct addresses used by clients through ServiceResolver lookup API
servicePortMap := make(map[string]int)
for roleName, svcCfg := range s.cfg.Services {
serviceName := getServiceName(roleName)
servicePortMap[serviceName] = svcCfg.RPC.Port
}
params.MembershipFactory, err = s.cfg.Ringpop.NewFactory(
params.RPCFactory.GetRingpopDispatcher(),
params.Name,
servicePortMap,
params.Logger,
)
if err != nil {
log.Fatalf("error creating ringpop factory: %v", err)
}
params.PProfInitializer = svcCfg.PProf.NewInitializer(params.Logger)
params.DCRedirectionPolicy = s.cfg.DCRedirectionPolicy
params.MetricsClient = metrics.NewClient(params.MetricScope, service.GetMetricsServiceIdx(params.Name, params.Logger))
clusterMetadata := s.cfg.ClusterMetadata
// This call performs a config check against the configured persistence store for immutable cluster metadata.
// If there is a mismatch, the persisted values take precedence and will be written over in the config objects.
// This is to keep this check hidden from independent downstream daemons and keep this in a single place.
immutableClusterMetadataInitialization(params.Logger, ¶ms.PersistenceConfig, ¶ms.MetricsClient, clusterMetadata)
params.ClusterMetadata = cluster.NewMetadata(
params.Logger,
dc.GetBoolProperty(dynamicconfig.EnableGlobalDomain, clusterMetadata.EnableGlobalDomain),
clusterMetadata.FailoverVersionIncrement,
clusterMetadata.MasterClusterName,
clusterMetadata.CurrentClusterName,
clusterMetadata.ClusterInformation,
clusterMetadata.ReplicationConsumer,
)
if s.cfg.PublicClient.HostPort != "" {
params.DispatcherProvider = client.NewDNSYarpcDispatcherProvider(params.Logger, s.cfg.PublicClient.RefreshInterval)
} else {
log.Fatalf("need to provide an endpoint config for PublicClient")
}
advancedVisMode := dc.GetStringProperty(
dynamicconfig.AdvancedVisibilityWritingMode,
common.GetDefaultAdvancedVisibilityWritingMode(params.PersistenceConfig.IsAdvancedVisibilityConfigExist()),
)()
isAdvancedVisEnabled := advancedVisMode != common.AdvancedVisibilityWritingModeOff
if params.ClusterMetadata.IsGlobalDomainEnabled() {
params.MessagingClient = messaging.NewKafkaClient(&s.cfg.Kafka, params.MetricsClient, zap.NewNop(), params.Logger, params.MetricScope, true, isAdvancedVisEnabled)
} else if isAdvancedVisEnabled {
params.MessagingClient = messaging.NewKafkaClient(&s.cfg.Kafka, params.MetricsClient, zap.NewNop(), params.Logger, params.MetricScope, false, isAdvancedVisEnabled)
} else {
params.MessagingClient = nil
}
if isAdvancedVisEnabled {
// verify config of advanced visibility store
advancedVisStoreKey := s.cfg.Persistence.AdvancedVisibilityStore
advancedVisStore, ok := s.cfg.Persistence.DataStores[advancedVisStoreKey]
if !ok {
log.Fatalf("not able to find advanced visibility store in config: %v", advancedVisStoreKey)
}
params.ESConfig = advancedVisStore.ElasticSearch
esClient, err := elasticsearch.NewClient(params.ESConfig)
if err != nil {
log.Fatalf("error creating elastic search client: %v", err)
}
params.ESClient = esClient
// verify index name
indexName, ok := params.ESConfig.Indices[common.VisibilityAppName]
if !ok || len(indexName) == 0 {
log.Fatalf("elastic search config missing visibility index")
}
}
connection, err := grpc.Dial(s.cfg.PublicClient.HostPort, grpc.WithInsecure())
if err != nil {
log.Fatalf("failed to construct connection: %v", err)
}
params.PublicClient = workflowservice.NewWorkflowServiceClient(connection)
params.ArchivalMetadata = archiver.NewArchivalMetadata(
dc,
s.cfg.Archival.History.Status,
s.cfg.Archival.History.EnableRead,
s.cfg.Archival.Visibility.Status,
s.cfg.Archival.Visibility.EnableRead,
&s.cfg.DomainDefaults.Archival,
)
params.ArchiverProvider = provider.NewArchiverProvider(s.cfg.Archival.History.Provider, s.cfg.Archival.Visibility.Provider)
params.PersistenceConfig.TransactionSizeLimit = dc.GetIntProperty(dynamicconfig.TransactionSizeLimit, common.DefaultTransactionSizeLimit)
params.Authorizer = authorization.NewNopAuthorizer()
params.Logger.Info("Starting service " + s.name)
var daemon common.Daemon
switch s.name {
case frontendService:
daemon, err = frontend.NewService(¶ms)
case historyService:
daemon, err = history.NewService(¶ms)
case matchingService:
daemon, err = matching.NewService(¶ms)
case workerService:
daemon, err = worker.NewService(¶ms)
}
if err != nil {
params.Logger.Fatal("Fail to start "+s.name+" service ", tag.Error(err))
}
go execute(daemon, s.doneC)
return daemon
}
func immutableClusterMetadataInitialization(
logger l.Logger,
persistenceConfig *config.Persistence,
metricsClient *metrics.Client,
clusterMetadata *config.ClusterMetadata) {
logger = logger.WithTags(tag.ComponentMetadataInitializer)
clusterMetadataManager, err := persistenceClient.NewFactory(
persistenceConfig,
clusterMetadata.CurrentClusterName,
*metricsClient,
logger,
).NewClusterMetadataManager()
if err != nil {
log.Fatalf("Error initializing cluster metadata manager: %v", err)
}
defer clusterMetadataManager.Close()
resp, err := clusterMetadataManager.InitializeImmutableClusterMetadata(
&persistence.InitializeImmutableClusterMetadataRequest{
ImmutableClusterMetadata: persist.ImmutableClusterMetadata{
HistoryShardCount: common.Int32Ptr(int32(persistenceConfig.NumHistoryShards)),
ClusterName: &clusterMetadata.CurrentClusterName,
}})
if err != nil {
log.Fatalf("Error while fetching or persisting immutable cluster metadata: %v", err)
}
if resp.RequestApplied {
logger.Info("Successfully applied immutable cluster metadata.")
} else {
if clusterMetadata.CurrentClusterName != *resp.PersistedImmutableData.ClusterName {
logImmutableMismatch(logger,
"ClusterMetadata.CurrentClusterName",
clusterMetadata.CurrentClusterName,
*resp.PersistedImmutableData.ClusterName)
clusterMetadata.CurrentClusterName = *resp.PersistedImmutableData.ClusterName
}
var persistedShardCount = int(*resp.PersistedImmutableData.HistoryShardCount)
if persistenceConfig.NumHistoryShards != persistedShardCount {
logImmutableMismatch(logger,
"Persistence.NumHistoryShards",
persistenceConfig.NumHistoryShards,
persistedShardCount)
persistenceConfig.NumHistoryShards = persistedShardCount
}
}
}
func logImmutableMismatch(l l.Logger, key string, ignored interface{}, value interface{}) {
l.Error(
"Supplied configuration key/value mismatches persisted ImmutableClusterMetadata."+
"Continuing with the persisted value as this value cannot be changed once initialized.",
tag.Key(key),
tag.IgnoredValue(ignored),
tag.Value(value))
}
// execute runs the daemon in a separate go routine
func execute(d common.Daemon, doneC chan struct{}) {
d.Start()
close(doneC)
}
// getServiceName converts the role name used in config to service name used by ringpop ring
func getServiceName(role string) string {
return "cadence-" + role
}
| 1 | 9,251 | For some reason it worked for frontend even before this change. | temporalio-temporal | go |
@@ -666,7 +666,10 @@ TEST (block_store, large_iteration)
{
auto transaction (store.tx_begin (true));
nano::account account;
- nano::random_pool.GenerateBlock (account.bytes.data (), account.bytes.size ());
+ {
+ std::lock_guard<std::mutex> lk (nano::random_pool_mutex);
+ nano::random_pool.GenerateBlock (account.bytes.data (), account.bytes.size ());
+ }
accounts1.insert (account);
store.account_put (transaction, account, nano::account_info ());
} | 1 | #include <gtest/gtest.h>
#include <nano/lib/utility.hpp>
#include <nano/node/common.hpp>
#include <nano/node/node.hpp>
#include <nano/secure/versioning.hpp>
#include <fstream>
TEST (block_store, construction)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
auto now (nano::seconds_since_epoch ());
ASSERT_GT (now, 1408074640);
}
TEST (block_store, sideband_serialization)
{
nano::block_sideband sideband1;
sideband1.type = nano::block_type::receive;
sideband1.account = 1;
sideband1.balance = 2;
sideband1.height = 3;
sideband1.successor = 4;
sideband1.timestamp = 5;
std::vector<uint8_t> vector;
{
nano::vectorstream stream1 (vector);
sideband1.serialize (stream1);
}
nano::bufferstream stream2 (vector.data (), vector.size ());
nano::block_sideband sideband2;
sideband2.type = nano::block_type::receive;
ASSERT_FALSE (sideband2.deserialize (stream2));
ASSERT_EQ (sideband1.account, sideband2.account);
ASSERT_EQ (sideband1.balance, sideband2.balance);
ASSERT_EQ (sideband1.height, sideband2.height);
ASSERT_EQ (sideband1.successor, sideband2.successor);
ASSERT_EQ (sideband1.timestamp, sideband2.timestamp);
}
TEST (block_store, add_item)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::open_block block (0, 1, 0, nano::keypair ().prv, 0, 0);
nano::uint256_union hash1 (block.hash ());
auto transaction (store.tx_begin (true));
auto latest1 (store.block_get (transaction, hash1));
ASSERT_EQ (nullptr, latest1);
ASSERT_FALSE (store.block_exists (transaction, hash1));
nano::block_sideband sideband (nano::block_type::open, 0, 0, 0, 0, 0);
store.block_put (transaction, hash1, block, sideband);
auto latest2 (store.block_get (transaction, hash1));
ASSERT_NE (nullptr, latest2);
ASSERT_EQ (block, *latest2);
ASSERT_TRUE (store.block_exists (transaction, hash1));
ASSERT_FALSE (store.block_exists (transaction, hash1.number () - 1));
store.block_del (transaction, hash1);
auto latest3 (store.block_get (transaction, hash1));
ASSERT_EQ (nullptr, latest3);
}
TEST (block_store, clear_successor)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::open_block block1 (0, 1, 0, nano::keypair ().prv, 0, 0);
auto transaction (store.tx_begin (true));
nano::block_sideband sideband (nano::block_type::open, 0, 0, 0, 0, 0);
store.block_put (transaction, block1.hash (), block1, sideband);
nano::open_block block2 (0, 2, 0, nano::keypair ().prv, 0, 0);
store.block_put (transaction, block2.hash (), block2, sideband);
ASSERT_NE (nullptr, store.block_get (transaction, block1.hash (), &sideband));
ASSERT_EQ (0, sideband.successor.number ());
sideband.successor = block2.hash ();
store.block_put (transaction, block1.hash (), block1, sideband);
ASSERT_NE (nullptr, store.block_get (transaction, block1.hash (), &sideband));
ASSERT_EQ (block2.hash (), sideband.successor);
store.block_successor_clear (transaction, block1.hash ());
ASSERT_NE (nullptr, store.block_get (transaction, block1.hash (), &sideband));
ASSERT_EQ (0, sideband.successor.number ());
}
TEST (block_store, add_nonempty_block)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::keypair key1;
nano::open_block block (0, 1, 0, nano::keypair ().prv, 0, 0);
nano::uint256_union hash1 (block.hash ());
block.signature = nano::sign_message (key1.prv, key1.pub, hash1);
auto transaction (store.tx_begin (true));
auto latest1 (store.block_get (transaction, hash1));
ASSERT_EQ (nullptr, latest1);
nano::block_sideband sideband (nano::block_type::open, 0, 0, 0, 0, 0);
store.block_put (transaction, hash1, block, sideband);
auto latest2 (store.block_get (transaction, hash1));
ASSERT_NE (nullptr, latest2);
ASSERT_EQ (block, *latest2);
}
TEST (block_store, add_two_items)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::keypair key1;
nano::open_block block (0, 1, 1, nano::keypair ().prv, 0, 0);
nano::uint256_union hash1 (block.hash ());
block.signature = nano::sign_message (key1.prv, key1.pub, hash1);
auto transaction (store.tx_begin (true));
auto latest1 (store.block_get (transaction, hash1));
ASSERT_EQ (nullptr, latest1);
nano::open_block block2 (0, 1, 3, nano::keypair ().prv, 0, 0);
block2.hashables.account = 3;
nano::uint256_union hash2 (block2.hash ());
block2.signature = nano::sign_message (key1.prv, key1.pub, hash2);
auto latest2 (store.block_get (transaction, hash2));
ASSERT_EQ (nullptr, latest2);
nano::block_sideband sideband (nano::block_type::open, 0, 0, 0, 0, 0);
store.block_put (transaction, hash1, block, sideband);
nano::block_sideband sideband2 (nano::block_type::open, 0, 0, 0, 0, 0);
store.block_put (transaction, hash2, block2, sideband2);
auto latest3 (store.block_get (transaction, hash1));
ASSERT_NE (nullptr, latest3);
ASSERT_EQ (block, *latest3);
auto latest4 (store.block_get (transaction, hash2));
ASSERT_NE (nullptr, latest4);
ASSERT_EQ (block2, *latest4);
ASSERT_FALSE (*latest3 == *latest4);
}
TEST (block_store, add_receive)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::keypair key1;
nano::keypair key2;
nano::open_block block1 (0, 1, 0, nano::keypair ().prv, 0, 0);
auto transaction (store.tx_begin (true));
nano::block_sideband sideband1 (nano::block_type::open, 0, 0, 0, 0, 0);
store.block_put (transaction, block1.hash (), block1, sideband1);
nano::receive_block block (block1.hash (), 1, nano::keypair ().prv, 2, 3);
nano::block_hash hash1 (block.hash ());
auto latest1 (store.block_get (transaction, hash1));
ASSERT_EQ (nullptr, latest1);
nano::block_sideband sideband (nano::block_type::receive, 0, 0, 0, 0, 0);
store.block_put (transaction, hash1, block, sideband);
auto latest2 (store.block_get (transaction, hash1));
ASSERT_NE (nullptr, latest2);
ASSERT_EQ (block, *latest2);
}
TEST (block_store, add_pending)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::keypair key1;
nano::pending_key key2 (0, 0);
nano::pending_info pending1;
auto transaction (store.tx_begin (true));
ASSERT_TRUE (store.pending_get (transaction, key2, pending1));
store.pending_put (transaction, key2, pending1);
nano::pending_info pending2;
ASSERT_FALSE (store.pending_get (transaction, key2, pending2));
ASSERT_EQ (pending1, pending2);
store.pending_del (transaction, key2);
ASSERT_TRUE (store.pending_get (transaction, key2, pending2));
}
TEST (block_store, pending_iterator)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
auto transaction (store.tx_begin (true));
ASSERT_EQ (store.pending_end (), store.pending_begin (transaction));
store.pending_put (transaction, nano::pending_key (1, 2), { 2, 3, nano::epoch::epoch_1 });
auto current (store.pending_begin (transaction));
ASSERT_NE (store.pending_end (), current);
nano::pending_key key1 (current->first);
ASSERT_EQ (nano::account (1), key1.account);
ASSERT_EQ (nano::block_hash (2), key1.hash);
nano::pending_info pending (current->second);
ASSERT_EQ (nano::account (2), pending.source);
ASSERT_EQ (nano::amount (3), pending.amount);
ASSERT_EQ (nano::epoch::epoch_1, pending.epoch);
}
/**
* Regression test for Issue 1164
* This reconstructs the situation where a key is larger in pending than the account being iterated in pending_v1, leaving
* iteration order up to the value, causing undefined behavior.
* After the bugfix, the value is compared only if the keys are equal.
*/
TEST (block_store, pending_iterator_comparison)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::stat stats;
auto transaction (store.tx_begin (true));
// Populate pending
store.pending_put (transaction, nano::pending_key (nano::account (3), nano::block_hash (1)), nano::pending_info (nano::account (10), nano::amount (1), nano::epoch::epoch_0));
store.pending_put (transaction, nano::pending_key (nano::account (3), nano::block_hash (4)), nano::pending_info (nano::account (10), nano::amount (0), nano::epoch::epoch_0));
// Populate pending_v1
store.pending_put (transaction, nano::pending_key (nano::account (2), nano::block_hash (2)), nano::pending_info (nano::account (10), nano::amount (2), nano::epoch::epoch_1));
store.pending_put (transaction, nano::pending_key (nano::account (2), nano::block_hash (3)), nano::pending_info (nano::account (10), nano::amount (3), nano::epoch::epoch_1));
// Iterate account 3 (pending)
{
size_t count = 0;
nano::account begin (3);
nano::account end (begin.number () + 1);
for (auto i (store.pending_begin (transaction, nano::pending_key (begin, 0))), n (store.pending_begin (transaction, nano::pending_key (end, 0))); i != n; ++i, ++count)
{
nano::pending_key key (i->first);
ASSERT_EQ (key.account, begin);
ASSERT_LT (count, 3);
}
ASSERT_EQ (count, 2);
}
// Iterate account 2 (pending_v1)
{
size_t count = 0;
nano::account begin (2);
nano::account end (begin.number () + 1);
for (auto i (store.pending_begin (transaction, nano::pending_key (begin, 0))), n (store.pending_begin (transaction, nano::pending_key (end, 0))); i != n; ++i, ++count)
{
nano::pending_key key (i->first);
ASSERT_EQ (key.account, begin);
ASSERT_LT (count, 3);
}
ASSERT_EQ (count, 2);
}
}
TEST (block_store, genesis)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::genesis genesis;
auto hash (genesis.hash ());
auto transaction (store.tx_begin (true));
store.initialize (transaction, genesis);
nano::account_info info;
ASSERT_FALSE (store.account_get (transaction, nano::genesis_account, info));
ASSERT_EQ (hash, info.head);
auto block1 (store.block_get (transaction, info.head));
ASSERT_NE (nullptr, block1);
auto receive1 (dynamic_cast<nano::open_block *> (block1.get ()));
ASSERT_NE (nullptr, receive1);
ASSERT_LE (info.modified, nano::seconds_since_epoch ());
auto test_pub_text (nano::test_genesis_key.pub.to_string ());
auto test_pub_account (nano::test_genesis_key.pub.to_account ());
auto test_prv_text (nano::test_genesis_key.prv.data.to_string ());
ASSERT_EQ (nano::genesis_account, nano::test_genesis_key.pub);
}
TEST (representation, changes)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::keypair key1;
auto transaction (store.tx_begin (true));
ASSERT_EQ (0, store.representation_get (transaction, key1.pub));
store.representation_put (transaction, key1.pub, 1);
ASSERT_EQ (1, store.representation_get (transaction, key1.pub));
store.representation_put (transaction, key1.pub, 2);
ASSERT_EQ (2, store.representation_get (transaction, key1.pub));
}
TEST (bootstrap, simple)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
auto block1 (std::make_shared<nano::send_block> (0, 1, 2, nano::keypair ().prv, 4, 5));
auto transaction (store.tx_begin (true));
auto block2 (store.unchecked_get (transaction, block1->previous ()));
ASSERT_TRUE (block2.empty ());
store.unchecked_put (transaction, block1->previous (), block1);
auto block3 (store.unchecked_get (transaction, block1->previous ()));
ASSERT_FALSE (block3.empty ());
ASSERT_EQ (*block1, *(block3[0].block));
store.unchecked_del (transaction, nano::unchecked_key (block1->previous (), block1->hash ()));
auto block4 (store.unchecked_get (transaction, block1->previous ()));
ASSERT_TRUE (block4.empty ());
}
TEST (unchecked, multiple)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
auto block1 (std::make_shared<nano::send_block> (4, 1, 2, nano::keypair ().prv, 4, 5));
auto transaction (store.tx_begin (true));
auto block2 (store.unchecked_get (transaction, block1->previous ()));
ASSERT_TRUE (block2.empty ());
store.unchecked_put (transaction, block1->previous (), block1);
store.unchecked_put (transaction, block1->source (), block1);
auto block3 (store.unchecked_get (transaction, block1->previous ()));
ASSERT_FALSE (block3.empty ());
auto block4 (store.unchecked_get (transaction, block1->source ()));
ASSERT_FALSE (block4.empty ());
}
TEST (unchecked, double_put)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
auto block1 (std::make_shared<nano::send_block> (4, 1, 2, nano::keypair ().prv, 4, 5));
auto transaction (store.tx_begin (true));
auto block2 (store.unchecked_get (transaction, block1->previous ()));
ASSERT_TRUE (block2.empty ());
store.unchecked_put (transaction, block1->previous (), block1);
store.unchecked_put (transaction, block1->previous (), block1);
auto block3 (store.unchecked_get (transaction, block1->previous ()));
ASSERT_EQ (block3.size (), 1);
}
TEST (unchecked, multiple_get)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
auto block1 (std::make_shared<nano::send_block> (4, 1, 2, nano::keypair ().prv, 4, 5));
auto block2 (std::make_shared<nano::send_block> (3, 1, 2, nano::keypair ().prv, 4, 5));
auto block3 (std::make_shared<nano::send_block> (5, 1, 2, nano::keypair ().prv, 4, 5));
{
auto transaction (store.tx_begin (true));
store.unchecked_put (transaction, block1->previous (), block1); // unchecked1
store.unchecked_put (transaction, block1->hash (), block1); // unchecked2
store.unchecked_put (transaction, block2->previous (), block2); // unchecked3
store.unchecked_put (transaction, block1->previous (), block2); // unchecked1
store.unchecked_put (transaction, block1->hash (), block2); // unchecked2
store.unchecked_put (transaction, block3->previous (), block3);
store.unchecked_put (transaction, block3->hash (), block3); // unchecked4
store.unchecked_put (transaction, block1->previous (), block3); // unchecked1
}
auto transaction (store.tx_begin ());
auto unchecked_count (store.unchecked_count (transaction));
ASSERT_EQ (unchecked_count, 8);
std::vector<nano::block_hash> unchecked1;
auto unchecked1_blocks (store.unchecked_get (transaction, block1->previous ()));
ASSERT_EQ (unchecked1_blocks.size (), 3);
for (auto & i : unchecked1_blocks)
{
unchecked1.push_back (i.block->hash ());
}
ASSERT_TRUE (std::find (unchecked1.begin (), unchecked1.end (), block1->hash ()) != unchecked1.end ());
ASSERT_TRUE (std::find (unchecked1.begin (), unchecked1.end (), block2->hash ()) != unchecked1.end ());
ASSERT_TRUE (std::find (unchecked1.begin (), unchecked1.end (), block3->hash ()) != unchecked1.end ());
std::vector<nano::block_hash> unchecked2;
auto unchecked2_blocks (store.unchecked_get (transaction, block1->hash ()));
ASSERT_EQ (unchecked2_blocks.size (), 2);
for (auto & i : unchecked2_blocks)
{
unchecked2.push_back (i.block->hash ());
}
ASSERT_TRUE (std::find (unchecked2.begin (), unchecked2.end (), block1->hash ()) != unchecked2.end ());
ASSERT_TRUE (std::find (unchecked2.begin (), unchecked2.end (), block2->hash ()) != unchecked2.end ());
auto unchecked3 (store.unchecked_get (transaction, block2->previous ()));
ASSERT_EQ (unchecked3.size (), 1);
ASSERT_EQ (unchecked3[0].block->hash (), block2->hash ());
auto unchecked4 (store.unchecked_get (transaction, block3->hash ()));
ASSERT_EQ (unchecked4.size (), 1);
ASSERT_EQ (unchecked4[0].block->hash (), block3->hash ());
auto unchecked5 (store.unchecked_get (transaction, block2->hash ()));
ASSERT_EQ (unchecked5.size (), 0);
}
TEST (block_store, empty_accounts)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
auto transaction (store.tx_begin ());
auto begin (store.latest_begin (transaction));
auto end (store.latest_end ());
ASSERT_EQ (end, begin);
}
TEST (block_store, one_block)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::open_block block1 (0, 1, 0, nano::keypair ().prv, 0, 0);
auto transaction (store.tx_begin (true));
nano::block_sideband sideband (nano::block_type::open, 0, 0, 0, 0, 0);
store.block_put (transaction, block1.hash (), block1, sideband);
ASSERT_TRUE (store.block_exists (transaction, block1.hash ()));
}
TEST (block_store, empty_bootstrap)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
auto transaction (store.tx_begin ());
auto begin (store.unchecked_begin (transaction));
auto end (store.unchecked_end ());
ASSERT_EQ (end, begin);
}
TEST (block_store, one_bootstrap)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
auto block1 (std::make_shared<nano::send_block> (0, 1, 2, nano::keypair ().prv, 4, 5));
auto transaction (store.tx_begin (true));
store.unchecked_put (transaction, block1->hash (), block1);
store.flush (transaction);
auto begin (store.unchecked_begin (transaction));
auto end (store.unchecked_end ());
ASSERT_NE (end, begin);
nano::uint256_union hash1 (begin->first.key ());
ASSERT_EQ (block1->hash (), hash1);
auto blocks (store.unchecked_get (transaction, hash1));
ASSERT_EQ (1, blocks.size ());
auto block2 (blocks[0].block);
ASSERT_EQ (*block1, *block2);
++begin;
ASSERT_EQ (end, begin);
}
TEST (block_store, unchecked_begin_search)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::keypair key0;
nano::send_block block1 (0, 1, 2, key0.prv, key0.pub, 3);
nano::send_block block2 (5, 6, 7, key0.prv, key0.pub, 8);
}
TEST (block_store, frontier_retrieval)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::account account1 (0);
nano::account_info info1 (0, 0, 0, 0, 0, 0, nano::epoch::epoch_0);
auto transaction (store.tx_begin (true));
store.account_put (transaction, account1, info1);
nano::account_info info2;
store.account_get (transaction, account1, info2);
ASSERT_EQ (info1, info2);
}
TEST (block_store, one_account)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::account account (0);
nano::block_hash hash (0);
auto transaction (store.tx_begin (true));
store.account_put (transaction, account, { hash, account, hash, 42, 100, 200, nano::epoch::epoch_0 });
auto begin (store.latest_begin (transaction));
auto end (store.latest_end ());
ASSERT_NE (end, begin);
ASSERT_EQ (account, nano::account (begin->first));
nano::account_info info (begin->second);
ASSERT_EQ (hash, info.head);
ASSERT_EQ (42, info.balance.number ());
ASSERT_EQ (100, info.modified);
ASSERT_EQ (200, info.block_count);
++begin;
ASSERT_EQ (end, begin);
}
TEST (block_store, two_block)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::open_block block1 (0, 1, 1, nano::keypair ().prv, 0, 0);
block1.hashables.account = 1;
std::vector<nano::block_hash> hashes;
std::vector<nano::open_block> blocks;
hashes.push_back (block1.hash ());
blocks.push_back (block1);
auto transaction (store.tx_begin (true));
nano::block_sideband sideband1 (nano::block_type::open, 0, 0, 0, 0, 0);
store.block_put (transaction, hashes[0], block1, sideband1);
nano::open_block block2 (0, 1, 2, nano::keypair ().prv, 0, 0);
hashes.push_back (block2.hash ());
blocks.push_back (block2);
nano::block_sideband sideband2 (nano::block_type::open, 0, 0, 0, 0, 0);
store.block_put (transaction, hashes[1], block2, sideband2);
ASSERT_TRUE (store.block_exists (transaction, block1.hash ()));
ASSERT_TRUE (store.block_exists (transaction, block2.hash ()));
}
TEST (block_store, two_account)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
store.stop ();
nano::account account1 (1);
nano::block_hash hash1 (2);
nano::account account2 (3);
nano::block_hash hash2 (4);
auto transaction (store.tx_begin (true));
store.account_put (transaction, account1, { hash1, account1, hash1, 42, 100, 300, nano::epoch::epoch_0 });
store.account_put (transaction, account2, { hash2, account2, hash2, 84, 200, 400, nano::epoch::epoch_0 });
auto begin (store.latest_begin (transaction));
auto end (store.latest_end ());
ASSERT_NE (end, begin);
ASSERT_EQ (account1, nano::account (begin->first));
nano::account_info info1 (begin->second);
ASSERT_EQ (hash1, info1.head);
ASSERT_EQ (42, info1.balance.number ());
ASSERT_EQ (100, info1.modified);
ASSERT_EQ (300, info1.block_count);
++begin;
ASSERT_NE (end, begin);
ASSERT_EQ (account2, nano::account (begin->first));
nano::account_info info2 (begin->second);
ASSERT_EQ (hash2, info2.head);
ASSERT_EQ (84, info2.balance.number ());
ASSERT_EQ (200, info2.modified);
ASSERT_EQ (400, info2.block_count);
++begin;
ASSERT_EQ (end, begin);
}
TEST (block_store, latest_find)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
store.stop ();
nano::account account1 (1);
nano::block_hash hash1 (2);
nano::account account2 (3);
nano::block_hash hash2 (4);
auto transaction (store.tx_begin (true));
store.account_put (transaction, account1, { hash1, account1, hash1, 100, 0, 300, nano::epoch::epoch_0 });
store.account_put (transaction, account2, { hash2, account2, hash2, 200, 0, 400, nano::epoch::epoch_0 });
auto first (store.latest_begin (transaction));
auto second (store.latest_begin (transaction));
++second;
auto find1 (store.latest_begin (transaction, 1));
ASSERT_EQ (first, find1);
auto find2 (store.latest_begin (transaction, 3));
ASSERT_EQ (second, find2);
auto find3 (store.latest_begin (transaction, 2));
ASSERT_EQ (second, find3);
}
TEST (block_store, bad_path)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, boost::filesystem::path ("///"));
ASSERT_TRUE (init);
}
TEST (block_store, DISABLED_already_open) // File can be shared
{
auto path (nano::unique_path ());
boost::filesystem::create_directories (path.parent_path ());
nano::set_secure_perm_directory (path.parent_path ());
std::ofstream file;
file.open (path.string ().c_str ());
ASSERT_TRUE (file.is_open ());
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
ASSERT_TRUE (init);
}
TEST (block_store, roots)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::send_block send_block (0, 1, 2, nano::keypair ().prv, 4, 5);
ASSERT_EQ (send_block.hashables.previous, send_block.root ());
nano::change_block change_block (0, 1, nano::keypair ().prv, 3, 4);
ASSERT_EQ (change_block.hashables.previous, change_block.root ());
nano::receive_block receive_block (0, 1, nano::keypair ().prv, 3, 4);
ASSERT_EQ (receive_block.hashables.previous, receive_block.root ());
nano::open_block open_block (0, 1, 2, nano::keypair ().prv, 4, 5);
ASSERT_EQ (open_block.hashables.account, open_block.root ());
}
TEST (block_store, pending_exists)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::pending_key two (2, 0);
nano::pending_info pending;
auto transaction (store.tx_begin (true));
store.pending_put (transaction, two, pending);
nano::pending_key one (1, 0);
ASSERT_FALSE (store.pending_exists (transaction, one));
}
TEST (block_store, latest_exists)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::block_hash two (2);
nano::account_info info;
auto transaction (store.tx_begin (true));
store.account_put (transaction, two, info);
nano::block_hash one (1);
ASSERT_FALSE (store.account_exists (transaction, one));
}
TEST (block_store, large_iteration)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
std::unordered_set<nano::account> accounts1;
for (auto i (0); i < 1000; ++i)
{
auto transaction (store.tx_begin (true));
nano::account account;
nano::random_pool.GenerateBlock (account.bytes.data (), account.bytes.size ());
accounts1.insert (account);
store.account_put (transaction, account, nano::account_info ());
}
std::unordered_set<nano::account> accounts2;
nano::account previous (0);
auto transaction (store.tx_begin ());
for (auto i (store.latest_begin (transaction, 0)), n (store.latest_end ()); i != n; ++i)
{
nano::account current (i->first);
assert (current.number () > previous.number ());
accounts2.insert (current);
previous = current;
}
ASSERT_EQ (accounts1, accounts2);
}
TEST (block_store, frontier)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
auto transaction (store.tx_begin (true));
nano::block_hash hash (100);
nano::account account (200);
ASSERT_TRUE (store.frontier_get (transaction, hash).is_zero ());
store.frontier_put (transaction, hash, account);
ASSERT_EQ (account, store.frontier_get (transaction, hash));
store.frontier_del (transaction, hash);
ASSERT_TRUE (store.frontier_get (transaction, hash).is_zero ());
}
TEST (block_store, block_replace)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::send_block send1 (0, 0, 0, nano::keypair ().prv, 0, 1);
nano::send_block send2 (0, 0, 0, nano::keypair ().prv, 0, 2);
auto transaction (store.tx_begin (true));
nano::block_sideband sideband1 (nano::block_type::send, 0, 0, 0, 0, 0);
store.block_put (transaction, 0, send1, sideband1);
nano::block_sideband sideband2 (nano::block_type::send, 0, 0, 0, 0, 0);
store.block_put (transaction, 0, send2, sideband2);
auto block3 (store.block_get (transaction, 0));
ASSERT_NE (nullptr, block3);
ASSERT_EQ (2, block3->block_work ());
}
TEST (block_store, block_count)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
auto transaction (store.tx_begin (true));
ASSERT_EQ (0, store.block_count (transaction).sum ());
nano::open_block block (0, 1, 0, nano::keypair ().prv, 0, 0);
nano::uint256_union hash1 (block.hash ());
nano::block_sideband sideband (nano::block_type::open, 0, 0, 0, 0, 0);
store.block_put (transaction, hash1, block, sideband);
ASSERT_EQ (1, store.block_count (transaction).sum ());
}
TEST (block_store, account_count)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
auto transaction (store.tx_begin (true));
ASSERT_EQ (0, store.account_count (transaction));
nano::account account (200);
store.account_put (transaction, account, nano::account_info ());
ASSERT_EQ (1, store.account_count (transaction));
}
TEST (block_store, sequence_increment)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::keypair key1;
nano::keypair key2;
auto block1 (std::make_shared<nano::open_block> (0, 1, 0, nano::keypair ().prv, 0, 0));
auto transaction (store.tx_begin (true));
auto vote1 (store.vote_generate (transaction, key1.pub, key1.prv, block1));
ASSERT_EQ (1, vote1->sequence);
auto vote2 (store.vote_generate (transaction, key1.pub, key1.prv, block1));
ASSERT_EQ (2, vote2->sequence);
auto vote3 (store.vote_generate (transaction, key2.pub, key2.prv, block1));
ASSERT_EQ (1, vote3->sequence);
auto vote4 (store.vote_generate (transaction, key2.pub, key2.prv, block1));
ASSERT_EQ (2, vote4->sequence);
vote1->sequence = 20;
auto seq5 (store.vote_max (transaction, vote1));
ASSERT_EQ (20, seq5->sequence);
vote3->sequence = 30;
auto seq6 (store.vote_max (transaction, vote3));
ASSERT_EQ (30, seq6->sequence);
auto vote5 (store.vote_generate (transaction, key1.pub, key1.prv, block1));
ASSERT_EQ (21, vote5->sequence);
auto vote6 (store.vote_generate (transaction, key2.pub, key2.prv, block1));
ASSERT_EQ (31, vote6->sequence);
}
TEST (block_store, upgrade_v2_v3)
{
nano::keypair key1;
nano::keypair key2;
nano::block_hash change_hash;
auto path (nano::unique_path ());
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
ASSERT_TRUE (!init);
store.stop ();
auto transaction (store.tx_begin (true));
nano::genesis genesis;
auto hash (genesis.hash ());
store.initialize (transaction, genesis);
nano::stat stats;
nano::ledger ledger (store, stats);
nano::change_block change (hash, key1.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
change_hash = change.hash ();
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, change).code);
ASSERT_EQ (0, ledger.weight (transaction, nano::test_genesis_key.pub));
ASSERT_EQ (nano::genesis_amount, ledger.weight (transaction, key1.pub));
store.version_put (transaction, 2);
store.representation_put (transaction, key1.pub, 7);
ASSERT_EQ (7, ledger.weight (transaction, key1.pub));
ASSERT_EQ (2, store.version_get (transaction));
store.representation_put (transaction, key2.pub, 6);
ASSERT_EQ (6, ledger.weight (transaction, key2.pub));
nano::account_info info;
ASSERT_FALSE (store.account_get (transaction, nano::test_genesis_key.pub, info));
info.rep_block = 42;
nano::account_info_v5 info_old (info.head, info.rep_block, info.open_block, info.balance, info.modified);
auto status (mdb_put (store.env.tx (transaction), store.accounts_v0, nano::mdb_val (nano::test_genesis_key.pub), info_old.val (), 0));
assert (status == 0);
}
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
nano::stat stats;
nano::ledger ledger (store, stats);
auto transaction (store.tx_begin (true));
ASSERT_TRUE (!init);
ASSERT_LT (2, store.version_get (transaction));
ASSERT_EQ (nano::genesis_amount, ledger.weight (transaction, key1.pub));
ASSERT_EQ (0, ledger.weight (transaction, key2.pub));
nano::account_info info;
ASSERT_FALSE (store.account_get (transaction, nano::test_genesis_key.pub, info));
ASSERT_EQ (change_hash, info.rep_block);
}
TEST (block_store, upgrade_v3_v4)
{
nano::keypair key1;
nano::keypair key2;
nano::keypair key3;
auto path (nano::unique_path ());
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
ASSERT_FALSE (init);
store.stop ();
auto transaction (store.tx_begin (true));
store.version_put (transaction, 3);
nano::pending_info_v3 info (key1.pub, 100, key2.pub);
auto status (mdb_put (store.env.tx (transaction), store.pending_v0, nano::mdb_val (key3.pub), info.val (), 0));
ASSERT_EQ (0, status);
}
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
nano::stat stats;
nano::ledger ledger (store, stats);
auto transaction (store.tx_begin (true));
ASSERT_FALSE (init);
ASSERT_LT (3, store.version_get (transaction));
nano::pending_key key (key2.pub, key3.pub);
nano::pending_info info;
auto error (store.pending_get (transaction, key, info));
ASSERT_FALSE (error);
ASSERT_EQ (key1.pub, info.source);
ASSERT_EQ (nano::amount (100), info.amount);
ASSERT_EQ (nano::epoch::epoch_0, info.epoch);
}
TEST (block_store, upgrade_v4_v5)
{
nano::block_hash genesis_hash (0);
nano::block_hash hash (0);
auto path (nano::unique_path ());
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
ASSERT_FALSE (init);
store.stop ();
auto transaction (store.tx_begin (true));
nano::genesis genesis;
nano::stat stats;
nano::ledger ledger (store, stats);
store.initialize (transaction, genesis);
store.version_put (transaction, 4);
nano::account_info info;
store.account_get (transaction, nano::test_genesis_key.pub, info);
nano::keypair key0;
nano::send_block block0 (info.head, key0.pub, nano::genesis_amount - nano::Gxrb_ratio, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, block0).code);
hash = block0.hash ();
auto original (store.block_get (transaction, info.head));
genesis_hash = info.head;
store.block_successor_clear (transaction, info.head);
ASSERT_TRUE (store.block_successor (transaction, genesis_hash).is_zero ());
nano::account_info info2;
store.account_get (transaction, nano::test_genesis_key.pub, info2);
nano::account_info_v5 info_old (info2.head, info2.rep_block, info2.open_block, info2.balance, info2.modified);
auto status (mdb_put (store.env.tx (transaction), store.accounts_v0, nano::mdb_val (nano::test_genesis_key.pub), info_old.val (), 0));
assert (status == 0);
}
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
ASSERT_FALSE (init);
auto transaction (store.tx_begin ());
ASSERT_EQ (hash, store.block_successor (transaction, genesis_hash));
}
TEST (block_store, block_random)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
nano::genesis genesis;
auto transaction (store.tx_begin (true));
store.initialize (transaction, genesis);
auto block (store.block_random (transaction));
ASSERT_NE (nullptr, block);
ASSERT_EQ (*block, *genesis.open);
}
TEST (block_store, upgrade_v5_v6)
{
auto path (nano::unique_path ());
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
ASSERT_FALSE (init);
store.stop ();
auto transaction (store.tx_begin (true));
nano::genesis genesis;
store.initialize (transaction, genesis);
store.version_put (transaction, 5);
nano::account_info info;
store.account_get (transaction, nano::test_genesis_key.pub, info);
nano::account_info_v5 info_old (info.head, info.rep_block, info.open_block, info.balance, info.modified);
auto status (mdb_put (store.env.tx (transaction), store.accounts_v0, nano::mdb_val (nano::test_genesis_key.pub), info_old.val (), 0));
assert (status == 0);
}
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
ASSERT_FALSE (init);
auto transaction (store.tx_begin ());
nano::account_info info;
store.account_get (transaction, nano::test_genesis_key.pub, info);
ASSERT_EQ (1, info.block_count);
}
TEST (block_store, upgrade_v6_v7)
{
auto path (nano::unique_path ());
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
ASSERT_FALSE (init);
store.stop ();
auto transaction (store.tx_begin (true));
nano::genesis genesis;
store.initialize (transaction, genesis);
store.version_put (transaction, 6);
auto send1 (std::make_shared<nano::send_block> (0, 0, 0, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0));
store.unchecked_put (transaction, send1->hash (), send1);
store.flush (transaction);
ASSERT_NE (store.unchecked_end (), store.unchecked_begin (transaction));
}
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
ASSERT_FALSE (init);
auto transaction (store.tx_begin ());
ASSERT_EQ (store.unchecked_end (), store.unchecked_begin (transaction));
}
// Databases need to be dropped in order to convert to dupsort compatible
TEST (block_store, DISABLED_change_dupsort) // Unchecked is no longer dupsort table
{
auto path (nano::unique_path ());
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
auto transaction (store.tx_begin (true));
ASSERT_EQ (0, mdb_drop (store.env.tx (transaction), store.unchecked, 1));
ASSERT_EQ (0, mdb_dbi_open (store.env.tx (transaction), "unchecked", MDB_CREATE, &store.unchecked));
auto send1 (std::make_shared<nano::send_block> (0, 0, 0, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0));
auto send2 (std::make_shared<nano::send_block> (1, 0, 0, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0));
ASSERT_NE (send1->hash (), send2->hash ());
store.unchecked_put (transaction, send1->hash (), send1);
store.unchecked_put (transaction, send1->hash (), send2);
store.flush (transaction);
{
auto iterator1 (store.unchecked_begin (transaction));
++iterator1;
ASSERT_EQ (store.unchecked_end (), iterator1);
}
ASSERT_EQ (0, mdb_drop (store.env.tx (transaction), store.unchecked, 0));
mdb_dbi_close (store.env, store.unchecked);
ASSERT_EQ (0, mdb_dbi_open (store.env.tx (transaction), "unchecked", MDB_CREATE | MDB_DUPSORT, &store.unchecked));
store.unchecked_put (transaction, send1->hash (), send1);
store.unchecked_put (transaction, send1->hash (), send2);
store.flush (transaction);
{
auto iterator1 (store.unchecked_begin (transaction));
++iterator1;
ASSERT_EQ (store.unchecked_end (), iterator1);
}
ASSERT_EQ (0, mdb_drop (store.env.tx (transaction), store.unchecked, 1));
ASSERT_EQ (0, mdb_dbi_open (store.env.tx (transaction), "unchecked", MDB_CREATE | MDB_DUPSORT, &store.unchecked));
store.unchecked_put (transaction, send1->hash (), send1);
store.unchecked_put (transaction, send1->hash (), send2);
store.flush (transaction);
{
auto iterator1 (store.unchecked_begin (transaction));
++iterator1;
ASSERT_NE (store.unchecked_end (), iterator1);
++iterator1;
ASSERT_EQ (store.unchecked_end (), iterator1);
}
}
TEST (block_store, upgrade_v7_v8)
{
auto path (nano::unique_path ());
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
store.stop ();
auto transaction (store.tx_begin (true));
ASSERT_EQ (0, mdb_drop (store.env.tx (transaction), store.unchecked, 1));
ASSERT_EQ (0, mdb_dbi_open (store.env.tx (transaction), "unchecked", MDB_CREATE, &store.unchecked));
store.version_put (transaction, 7);
}
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
ASSERT_FALSE (init);
auto transaction (store.tx_begin (true));
auto send1 (std::make_shared<nano::send_block> (0, 0, 0, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0));
auto send2 (std::make_shared<nano::send_block> (1, 0, 0, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0));
store.unchecked_put (transaction, send1->hash (), send1);
store.unchecked_put (transaction, send1->hash (), send2);
store.flush (transaction);
{
auto iterator1 (store.unchecked_begin (transaction));
++iterator1;
ASSERT_NE (store.unchecked_end (), iterator1);
++iterator1;
ASSERT_EQ (store.unchecked_end (), iterator1);
}
}
TEST (block_store, sequence_flush)
{
auto path (nano::unique_path ());
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
ASSERT_FALSE (init);
auto transaction (store.tx_begin (true));
nano::keypair key1;
auto send1 (std::make_shared<nano::send_block> (0, 0, 0, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0));
auto vote1 (store.vote_generate (transaction, key1.pub, key1.prv, send1));
auto seq2 (store.vote_get (transaction, vote1->account));
ASSERT_EQ (nullptr, seq2);
store.flush (transaction);
auto seq3 (store.vote_get (transaction, vote1->account));
ASSERT_EQ (*seq3, *vote1);
}
TEST (block_store, sequence_flush_by_hash)
{
auto path (nano::unique_path ());
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
ASSERT_FALSE (init);
auto transaction (store.tx_begin_write ());
nano::keypair key1;
std::vector<nano::block_hash> blocks1;
blocks1.push_back (nano::genesis ().hash ());
blocks1.push_back (1234);
blocks1.push_back (5678);
auto vote1 (store.vote_generate (transaction, key1.pub, key1.prv, blocks1));
auto seq2 (store.vote_get (transaction, vote1->account));
ASSERT_EQ (nullptr, seq2);
store.flush (transaction);
auto seq3 (store.vote_get (transaction, vote1->account));
ASSERT_EQ (*seq3, *vote1);
}
// Upgrading tracking block sequence numbers to whole vote.
TEST (block_store, upgrade_v8_v9)
{
auto path (nano::unique_path ());
nano::keypair key;
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
store.stop ();
auto transaction (store.tx_begin (true));
ASSERT_EQ (0, mdb_drop (store.env.tx (transaction), store.vote, 1));
ASSERT_EQ (0, mdb_dbi_open (store.env.tx (transaction), "sequence", MDB_CREATE, &store.vote));
uint64_t sequence (10);
ASSERT_EQ (0, mdb_put (store.env.tx (transaction), store.vote, nano::mdb_val (key.pub), nano::mdb_val (sizeof (sequence), &sequence), 0));
store.version_put (transaction, 8);
}
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, path);
ASSERT_FALSE (init);
auto transaction (store.tx_begin ());
ASSERT_LT (8, store.version_get (transaction));
auto vote (store.vote_get (transaction, key.pub));
ASSERT_NE (nullptr, vote);
ASSERT_EQ (10, vote->sequence);
}
TEST (block_store, state_block)
{
nano::logging logging;
bool error (false);
nano::mdb_store store (error, logging, nano::unique_path ());
ASSERT_FALSE (error);
nano::genesis genesis;
auto transaction (store.tx_begin (true));
store.initialize (transaction, genesis);
nano::keypair key1;
nano::state_block block1 (1, genesis.hash (), 3, 4, 6, key1.prv, key1.pub, 7);
ASSERT_EQ (nano::block_type::state, block1.type ());
nano::block_sideband sideband1 (nano::block_type::state, 0, 0, 0, 0, 0);
store.block_put (transaction, block1.hash (), block1, sideband1);
ASSERT_TRUE (store.block_exists (transaction, block1.hash ()));
auto block2 (store.block_get (transaction, block1.hash ()));
ASSERT_NE (nullptr, block2);
ASSERT_EQ (block1, *block2);
auto count (store.block_count (transaction));
ASSERT_EQ (1, count.state_v0);
ASSERT_EQ (0, count.state_v1);
store.block_del (transaction, block1.hash ());
ASSERT_FALSE (store.block_exists (transaction, block1.hash ()));
auto count2 (store.block_count (transaction));
ASSERT_EQ (0, count2.state_v0);
ASSERT_EQ (0, count2.state_v1);
}
namespace
{
void write_legacy_sideband (nano::mdb_store & store_a, nano::transaction & transaction_a, nano::block & block_a, nano::block_hash const & successor_a, MDB_dbi db_a)
{
std::vector<uint8_t> vector;
{
nano::vectorstream stream (vector);
block_a.serialize (stream);
nano::write (stream, successor_a);
}
MDB_val val{ vector.size (), vector.data () };
auto hash (block_a.hash ());
auto status2 (mdb_put (store_a.env.tx (transaction_a), db_a, nano::mdb_val (hash), &val, 0));
ASSERT_EQ (0, status2);
nano::block_sideband sideband;
auto block2 (store_a.block_get (transaction_a, block_a.hash (), &sideband));
ASSERT_NE (nullptr, block2);
ASSERT_EQ (0, sideband.height);
};
}
TEST (block_store, upgrade_sideband_genesis)
{
bool error (false);
nano::genesis genesis;
auto path (nano::unique_path ());
{
nano::logging logging;
nano::mdb_store store (error, logging, path);
ASSERT_FALSE (error);
store.stop ();
auto transaction (store.tx_begin (true));
store.version_put (transaction, 11);
store.initialize (transaction, genesis);
nano::block_sideband sideband;
auto genesis_block (store.block_get (transaction, genesis.hash (), &sideband));
ASSERT_NE (nullptr, genesis_block);
ASSERT_EQ (1, sideband.height);
write_legacy_sideband (store, transaction, *genesis_block, 0, store.open_blocks);
auto genesis_block2 (store.block_get (transaction, genesis.hash (), &sideband));
ASSERT_NE (nullptr, genesis_block);
ASSERT_EQ (0, sideband.height);
}
nano::logging logging;
nano::mdb_store store (error, logging, path);
ASSERT_FALSE (error);
auto done (false);
auto iterations (0);
while (!done)
{
std::this_thread::sleep_for (std::chrono::milliseconds (10));
auto transaction (store.tx_begin (false));
done = store.full_sideband (transaction);
ASSERT_LT (iterations, 200);
++iterations;
}
auto transaction (store.tx_begin_read ());
nano::block_sideband sideband;
auto genesis_block (store.block_get (transaction, genesis.hash (), &sideband));
ASSERT_NE (nullptr, genesis_block);
ASSERT_EQ (1, sideband.height);
}
TEST (block_store, upgrade_sideband_two_blocks)
{
bool error (false);
nano::genesis genesis;
nano::block_hash hash2;
auto path (nano::unique_path ());
{
nano::logging logging;
nano::mdb_store store (error, logging, path);
ASSERT_FALSE (error);
store.stop ();
nano::stat stat;
nano::ledger ledger (store, stat);
auto transaction (store.tx_begin (true));
store.version_put (transaction, 11);
store.initialize (transaction, genesis);
nano::state_block block (nano::test_genesis_key.pub, genesis.hash (), nano::test_genesis_key.pub, nano::genesis_amount - nano::Gxrb_ratio, nano::test_genesis_key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
hash2 = block.hash ();
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, block).code);
write_legacy_sideband (store, transaction, *genesis.open, hash2, store.open_blocks);
write_legacy_sideband (store, transaction, block, 0, store.state_blocks_v0);
}
nano::logging logging;
nano::mdb_store store (error, logging, path);
ASSERT_FALSE (error);
auto done (false);
auto iterations (0);
while (!done)
{
std::this_thread::sleep_for (std::chrono::milliseconds (10));
auto transaction (store.tx_begin (false));
done = store.full_sideband (transaction);
ASSERT_LT (iterations, 200);
++iterations;
}
auto transaction (store.tx_begin_read ());
nano::block_sideband sideband;
auto genesis_block (store.block_get (transaction, genesis.hash (), &sideband));
ASSERT_NE (nullptr, genesis_block);
ASSERT_EQ (1, sideband.height);
nano::block_sideband sideband2;
auto block2 (store.block_get (transaction, hash2, &sideband2));
ASSERT_NE (nullptr, block2);
ASSERT_EQ (2, sideband2.height);
}
TEST (block_store, upgrade_sideband_two_accounts)
{
bool error (false);
nano::genesis genesis;
nano::block_hash hash2;
nano::block_hash hash3;
nano::keypair key;
auto path (nano::unique_path ());
{
nano::logging logging;
nano::mdb_store store (error, logging, path);
ASSERT_FALSE (error);
store.stop ();
nano::stat stat;
nano::ledger ledger (store, stat);
auto transaction (store.tx_begin (true));
store.version_put (transaction, 11);
store.initialize (transaction, genesis);
nano::state_block block1 (nano::test_genesis_key.pub, genesis.hash (), nano::test_genesis_key.pub, nano::genesis_amount - nano::Gxrb_ratio, key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
hash2 = block1.hash ();
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, block1).code);
nano::state_block block2 (key.pub, 0, nano::test_genesis_key.pub, nano::Gxrb_ratio, hash2, key.prv, key.pub, 0);
hash3 = block2.hash ();
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, block2).code);
write_legacy_sideband (store, transaction, *genesis.open, hash2, store.open_blocks);
write_legacy_sideband (store, transaction, block1, 0, store.state_blocks_v0);
write_legacy_sideband (store, transaction, block2, 0, store.state_blocks_v0);
}
nano::logging logging;
nano::mdb_store store (error, logging, path);
ASSERT_FALSE (error);
auto done (false);
auto iterations (0);
while (!done)
{
std::this_thread::sleep_for (std::chrono::milliseconds (10));
auto transaction (store.tx_begin (false));
done = store.full_sideband (transaction);
ASSERT_LT (iterations, 200);
++iterations;
}
auto transaction (store.tx_begin_read ());
nano::block_sideband sideband;
auto genesis_block (store.block_get (transaction, genesis.hash (), &sideband));
ASSERT_NE (nullptr, genesis_block);
ASSERT_EQ (1, sideband.height);
nano::block_sideband sideband2;
auto block2 (store.block_get (transaction, hash2, &sideband2));
ASSERT_NE (nullptr, block2);
ASSERT_EQ (2, sideband2.height);
nano::block_sideband sideband3;
auto block3 (store.block_get (transaction, hash3, &sideband3));
ASSERT_NE (nullptr, block3);
ASSERT_EQ (1, sideband3.height);
}
TEST (block_store, insert_after_legacy)
{
nano::logging logging;
bool error (false);
nano::genesis genesis;
nano::mdb_store store (error, logging, nano::unique_path ());
ASSERT_FALSE (error);
store.stop ();
nano::stat stat;
nano::ledger ledger (store, stat);
auto transaction (store.tx_begin (true));
store.version_put (transaction, 11);
store.initialize (transaction, genesis);
write_legacy_sideband (store, transaction, *genesis.open, 0, store.open_blocks);
nano::state_block block (nano::test_genesis_key.pub, genesis.hash (), nano::test_genesis_key.pub, nano::genesis_amount - nano::Gxrb_ratio, nano::test_genesis_key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, block).code);
}
TEST (block_store, upgrade_sideband_rollback_old)
{
nano::logging logging;
bool error (false);
nano::genesis genesis;
nano::mdb_store store (error, logging, nano::unique_path ());
ASSERT_FALSE (error);
store.stop ();
nano::stat stat;
nano::ledger ledger (store, stat);
auto transaction (store.tx_begin (true));
store.version_put (transaction, 11);
store.initialize (transaction, genesis);
nano::send_block block1 (genesis.hash (), nano::test_genesis_key.pub, nano::genesis_amount - nano::Gxrb_ratio, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, block1).code);
nano::send_block block2 (block1.hash (), nano::test_genesis_key.pub, nano::genesis_amount - 2 * nano::Gxrb_ratio, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, block2).code);
write_legacy_sideband (store, transaction, *genesis.open, block1.hash (), store.open_blocks);
write_legacy_sideband (store, transaction, block1, block2.hash (), store.send_blocks);
write_legacy_sideband (store, transaction, block2, 0, store.send_blocks);
ASSERT_TRUE (store.block_exists (transaction, block2.hash ()));
ledger.rollback (transaction, block2.hash ());
ASSERT_FALSE (store.block_exists (transaction, block2.hash ()));
}
// Account for an open block should be retrievable
TEST (block_store, legacy_account_computed)
{
nano::logging logging;
bool init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
store.stop ();
nano::stat stats;
nano::ledger ledger (store, stats);
nano::genesis genesis;
auto transaction (store.tx_begin (true));
store.initialize (transaction, genesis);
store.version_put (transaction, 11);
write_legacy_sideband (store, transaction, *genesis.open, 0, store.open_blocks);
ASSERT_EQ (nano::genesis_account, ledger.account (transaction, genesis.hash ()));
}
TEST (block_store, upgrade_sideband_epoch)
{
bool error (false);
nano::genesis genesis;
nano::block_hash hash2;
auto path (nano::unique_path ());
{
nano::logging logging;
nano::mdb_store store (error, logging, path);
ASSERT_FALSE (error);
store.stop ();
nano::stat stat;
nano::ledger ledger (store, stat, 42, nano::test_genesis_key.pub);
auto transaction (store.tx_begin (true));
store.version_put (transaction, 11);
store.initialize (transaction, genesis);
nano::state_block block1 (nano::test_genesis_key.pub, genesis.hash (), nano::test_genesis_key.pub, nano::genesis_amount, 42, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
hash2 = block1.hash ();
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, block1).code);
ASSERT_EQ (nano::epoch::epoch_1, store.block_version (transaction, hash2));
write_legacy_sideband (store, transaction, *genesis.open, hash2, store.open_blocks);
write_legacy_sideband (store, transaction, block1, 0, store.state_blocks_v1);
}
nano::logging logging;
nano::mdb_store store (error, logging, path);
nano::stat stat;
nano::ledger ledger (store, stat, 42, nano::test_genesis_key.pub);
ASSERT_FALSE (error);
auto done (false);
auto iterations (0);
while (!done)
{
std::this_thread::sleep_for (std::chrono::milliseconds (10));
auto transaction (store.tx_begin (false));
done = store.full_sideband (transaction);
ASSERT_LT (iterations, 200);
++iterations;
}
auto transaction (store.tx_begin_write ());
ASSERT_EQ (nano::epoch::epoch_1, store.block_version (transaction, hash2));
nano::block_sideband sideband;
auto block1 (store.block_get (transaction, hash2, &sideband));
ASSERT_NE (0, sideband.height);
nano::state_block block2 (nano::test_genesis_key.pub, hash2, nano::test_genesis_key.pub, nano::genesis_amount - nano::Gxrb_ratio, nano::test_genesis_key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, block2).code);
ASSERT_EQ (nano::epoch::epoch_1, store.block_version (transaction, block2.hash ()));
}
TEST (block_store, sideband_height)
{
nano::logging logging;
bool error (false);
nano::genesis genesis;
nano::keypair epoch_key;
nano::keypair key1;
nano::keypair key2;
nano::keypair key3;
nano::mdb_store store (error, logging, nano::unique_path ());
ASSERT_FALSE (error);
store.stop ();
nano::stat stat;
nano::ledger ledger (store, stat);
ledger.epoch_signer = epoch_key.pub;
auto transaction (store.tx_begin (true));
store.initialize (transaction, genesis);
nano::send_block send (genesis.hash (), nano::test_genesis_key.pub, nano::genesis_amount - nano::Gxrb_ratio, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, send).code);
nano::receive_block receive (send.hash (), send.hash (), nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, receive).code);
nano::change_block change (receive.hash (), 0, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, change).code);
nano::state_block state_send1 (nano::test_genesis_key.pub, change.hash (), 0, nano::genesis_amount - nano::Gxrb_ratio, key1.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, state_send1).code);
nano::state_block state_send2 (nano::test_genesis_key.pub, state_send1.hash (), 0, nano::genesis_amount - 2 * nano::Gxrb_ratio, key2.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, state_send2).code);
nano::state_block state_send3 (nano::test_genesis_key.pub, state_send2.hash (), 0, nano::genesis_amount - 3 * nano::Gxrb_ratio, key3.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, 0);
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, state_send3).code);
nano::state_block state_open (key1.pub, 0, 0, nano::Gxrb_ratio, state_send1.hash (), key1.prv, key1.pub, 0);
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, state_open).code);
nano::state_block epoch (key1.pub, state_open.hash (), 0, nano::Gxrb_ratio, ledger.epoch_link, epoch_key.prv, epoch_key.pub, 0);
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, epoch).code);
ASSERT_EQ (nano::epoch::epoch_1, store.block_version (transaction, epoch.hash ()));
nano::state_block epoch_open (key2.pub, 0, 0, 0, ledger.epoch_link, epoch_key.prv, epoch_key.pub, 0);
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, epoch_open).code);
ASSERT_EQ (nano::epoch::epoch_1, store.block_version (transaction, epoch_open.hash ()));
nano::state_block state_receive (key2.pub, epoch_open.hash (), 0, nano::Gxrb_ratio, state_send2.hash (), key2.prv, key2.pub, 0);
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, state_receive).code);
nano::open_block open (state_send3.hash (), nano::test_genesis_key.pub, key3.pub, key3.prv, key3.pub, 0);
ASSERT_EQ (nano::process_result::progress, ledger.process (transaction, open).code);
nano::block_sideband sideband1;
auto block1 (store.block_get (transaction, genesis.hash (), &sideband1));
ASSERT_EQ (sideband1.height, 1);
nano::block_sideband sideband2;
auto block2 (store.block_get (transaction, send.hash (), &sideband2));
ASSERT_EQ (sideband2.height, 2);
nano::block_sideband sideband3;
auto block3 (store.block_get (transaction, receive.hash (), &sideband3));
ASSERT_EQ (sideband3.height, 3);
nano::block_sideband sideband4;
auto block4 (store.block_get (transaction, change.hash (), &sideband4));
ASSERT_EQ (sideband4.height, 4);
nano::block_sideband sideband5;
auto block5 (store.block_get (transaction, state_send1.hash (), &sideband5));
ASSERT_EQ (sideband5.height, 5);
nano::block_sideband sideband6;
auto block6 (store.block_get (transaction, state_send2.hash (), &sideband6));
ASSERT_EQ (sideband6.height, 6);
nano::block_sideband sideband7;
auto block7 (store.block_get (transaction, state_send3.hash (), &sideband7));
ASSERT_EQ (sideband7.height, 7);
nano::block_sideband sideband8;
auto block8 (store.block_get (transaction, state_open.hash (), &sideband8));
ASSERT_EQ (sideband8.height, 1);
nano::block_sideband sideband9;
auto block9 (store.block_get (transaction, epoch.hash (), &sideband9));
ASSERT_EQ (sideband9.height, 2);
nano::block_sideband sideband10;
auto block10 (store.block_get (transaction, epoch_open.hash (), &sideband10));
ASSERT_EQ (sideband10.height, 1);
nano::block_sideband sideband11;
auto block11 (store.block_get (transaction, state_receive.hash (), &sideband11));
ASSERT_EQ (sideband11.height, 2);
nano::block_sideband sideband12;
auto block12 (store.block_get (transaction, open.hash (), &sideband12));
ASSERT_EQ (sideband12.height, 1);
}
TEST (block_store, peers)
{
nano::logging logging;
auto init (false);
nano::mdb_store store (init, logging, nano::unique_path ());
ASSERT_TRUE (!init);
auto transaction (store.tx_begin_write ());
nano::endpoint_key endpoint (boost::asio::ip::address_v6::any ().to_bytes (), 100);
// Confirm that the store is empty
ASSERT_FALSE (store.peer_exists (transaction, endpoint));
ASSERT_EQ (store.peer_count (transaction), 0);
// Add one, confirm that it can be found
store.peer_put (transaction, endpoint);
ASSERT_TRUE (store.peer_exists (transaction, endpoint));
ASSERT_EQ (store.peer_count (transaction), 1);
// Add another one and check that it (and the existing one) can be found
nano::endpoint_key endpoint1 (boost::asio::ip::address_v6::any ().to_bytes (), 101);
store.peer_put (transaction, endpoint1);
ASSERT_TRUE (store.peer_exists (transaction, endpoint1)); // Check new peer is here
ASSERT_TRUE (store.peer_exists (transaction, endpoint)); // Check first peer is still here
ASSERT_EQ (store.peer_count (transaction), 2);
// Delete the first one
store.peer_del (transaction, endpoint1);
ASSERT_FALSE (store.peer_exists (transaction, endpoint1)); // Confirm it no longer exists
ASSERT_TRUE (store.peer_exists (transaction, endpoint)); // Check first peer is still here
ASSERT_EQ (store.peer_count (transaction), 1);
// Delete original one
store.peer_del (transaction, endpoint);
ASSERT_EQ (store.peer_count (transaction), 0);
ASSERT_FALSE (store.peer_exists (transaction, endpoint));
}
TEST (block_store, endpoint_key_byte_order)
{
boost::asio::ip::address_v6 address (boost::asio::ip::address_v6::from_string ("::ffff:127.0.0.1"));
auto port = 100;
nano::endpoint_key endpoint_key (address.to_bytes (), port);
std::vector<uint8_t> bytes;
{
nano::vectorstream stream (bytes);
nano::write (stream, endpoint_key);
}
// This checks that the endpoint is serialized as expected, with a size
// of 18 bytes (16 for ipv6 address and 2 for port), both in network byte order.
ASSERT_EQ (bytes.size (), 18);
ASSERT_EQ (bytes[10], 0xff);
ASSERT_EQ (bytes[11], 0xff);
ASSERT_EQ (bytes[12], 127);
ASSERT_EQ (bytes[bytes.size () - 2], 0);
ASSERT_EQ (bytes.back (), 100);
// Deserialize the same stream bytes
nano::bufferstream stream1 (bytes.data (), bytes.size ());
nano::endpoint_key endpoint_key1;
nano::read (stream1, endpoint_key1);
// This should be in network bytes order
ASSERT_EQ (address.to_bytes (), endpoint_key1.address_bytes ());
// This should be in host byte order
ASSERT_EQ (port, endpoint_key1.port ());
}
TEST (block_store, online_weight)
{
nano::logging logging;
bool error (false);
nano::mdb_store store (error, logging, nano::unique_path ());
ASSERT_FALSE (error);
auto transaction (store.tx_begin (true));
ASSERT_EQ (0, store.online_weight_count (transaction));
ASSERT_EQ (store.online_weight_end (), store.online_weight_begin (transaction));
store.online_weight_put (transaction, 1, 2);
ASSERT_EQ (1, store.online_weight_count (transaction));
auto item (store.online_weight_begin (transaction));
ASSERT_NE (store.online_weight_end (), item);
ASSERT_EQ (1, item->first);
ASSERT_EQ (2, item->second.number ());
store.online_weight_del (transaction, 1);
ASSERT_EQ (0, store.online_weight_count (transaction));
ASSERT_EQ (store.online_weight_end (), store.online_weight_begin (transaction));
}
| 1 | 14,998 | Can we encapsulate this in a function so if we want to change it against we don't have to change all call sites? | nanocurrency-nano-node | cpp |
@@ -470,8 +470,8 @@ void nano::bulk_pull_client::received_block (boost::system::error_code const & e
connection->start_time = std::chrono::steady_clock::now ();
}
connection->attempt->total_blocks++;
- total_blocks++;
bool stop_pull (connection->attempt->process_block (block, known_account, total_blocks, block_expected));
+ total_blocks++;
if (!stop_pull && !connection->hard_stop.load ())
{
/* Process block in lazy pull if not stopped | 1 | #include <nano/crypto_lib/random_pool.hpp>
#include <nano/node/bootstrap.hpp>
#include <nano/node/common.hpp>
#include <nano/node/node.hpp>
#include <nano/node/transport/tcp.hpp>
#include <nano/node/transport/udp.hpp>
#include <boost/log/trivial.hpp>
#include <algorithm>
constexpr double bootstrap_connection_scale_target_blocks = 50000.0;
constexpr double bootstrap_connection_warmup_time_sec = 5.0;
constexpr double bootstrap_minimum_blocks_per_sec = 10.0;
constexpr double bootstrap_minimum_elapsed_seconds_blockrate = 0.02;
constexpr double bootstrap_minimum_frontier_blocks_per_sec = 1000.0;
constexpr unsigned bootstrap_frontier_retry_limit = 16;
constexpr double bootstrap_minimum_termination_time_sec = 30.0;
constexpr unsigned bootstrap_max_new_connections = 10;
constexpr unsigned bulk_push_cost_limit = 200;
size_t constexpr nano::frontier_req_client::size_frontier;
nano::bootstrap_client::bootstrap_client (std::shared_ptr<nano::node> node_a, std::shared_ptr<nano::bootstrap_attempt> attempt_a, std::shared_ptr<nano::transport::channel_tcp> channel_a) :
node (node_a),
attempt (attempt_a),
channel (channel_a),
receive_buffer (std::make_shared<std::vector<uint8_t>> ()),
start_time (std::chrono::steady_clock::now ()),
block_count (0),
pending_stop (false),
hard_stop (false)
{
++attempt->connections;
receive_buffer->resize (256);
}
nano::bootstrap_client::~bootstrap_client ()
{
--attempt->connections;
}
double nano::bootstrap_client::block_rate () const
{
auto elapsed = std::max (elapsed_seconds (), bootstrap_minimum_elapsed_seconds_blockrate);
return static_cast<double> (block_count.load () / elapsed);
}
double nano::bootstrap_client::elapsed_seconds () const
{
return std::chrono::duration_cast<std::chrono::duration<double>> (std::chrono::steady_clock::now () - start_time).count ();
}
void nano::bootstrap_client::stop (bool force)
{
pending_stop = true;
if (force)
{
hard_stop = true;
}
}
void nano::frontier_req_client::run ()
{
nano::frontier_req request;
request.start.clear ();
request.age = std::numeric_limits<decltype (request.age)>::max ();
request.count = std::numeric_limits<decltype (request.count)>::max ();
auto this_l (shared_from_this ());
connection->channel->send (
request, [this_l](boost::system::error_code const & ec, size_t size_a) {
if (!ec)
{
this_l->receive_frontier ();
}
else
{
if (this_l->connection->node->config.logging.network_logging ())
{
this_l->connection->node->logger.try_log (boost::str (boost::format ("Error while sending bootstrap request %1%") % ec.message ()));
}
}
},
false); // is bootstrap traffic is_dropable false
}
std::shared_ptr<nano::bootstrap_client> nano::bootstrap_client::shared ()
{
return shared_from_this ();
}
nano::frontier_req_client::frontier_req_client (std::shared_ptr<nano::bootstrap_client> connection_a) :
connection (connection_a),
current (0),
count (0),
bulk_push_cost (0)
{
auto transaction (connection->node->store.tx_begin_read ());
next (transaction);
}
nano::frontier_req_client::~frontier_req_client ()
{
}
void nano::frontier_req_client::receive_frontier ()
{
auto this_l (shared_from_this ());
connection->channel->socket->async_read (connection->receive_buffer, nano::frontier_req_client::size_frontier, [this_l](boost::system::error_code const & ec, size_t size_a) {
// An issue with asio is that sometimes, instead of reporting a bad file descriptor during disconnect,
// we simply get a size of 0.
if (size_a == nano::frontier_req_client::size_frontier)
{
this_l->received_frontier (ec, size_a);
}
else
{
if (this_l->connection->node->config.logging.network_message_logging ())
{
this_l->connection->node->logger.try_log (boost::str (boost::format ("Invalid size: expected %1%, got %2%") % nano::frontier_req_client::size_frontier % size_a));
}
}
});
}
void nano::frontier_req_client::unsynced (nano::block_hash const & head, nano::block_hash const & end)
{
if (bulk_push_cost < bulk_push_cost_limit)
{
connection->attempt->add_bulk_push_target (head, end);
if (end.is_zero ())
{
bulk_push_cost += 2;
}
else
{
bulk_push_cost += 1;
}
}
}
void nano::frontier_req_client::received_frontier (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
assert (size_a == nano::frontier_req_client::size_frontier);
nano::account account;
nano::bufferstream account_stream (connection->receive_buffer->data (), sizeof (account));
auto error1 (nano::try_read (account_stream, account));
(void)error1;
assert (!error1);
nano::block_hash latest;
nano::bufferstream latest_stream (connection->receive_buffer->data () + sizeof (account), sizeof (latest));
auto error2 (nano::try_read (latest_stream, latest));
(void)error2;
assert (!error2);
if (count == 0)
{
start_time = std::chrono::steady_clock::now ();
}
++count;
std::chrono::duration<double> time_span = std::chrono::duration_cast<std::chrono::duration<double>> (std::chrono::steady_clock::now () - start_time);
double elapsed_sec = std::max (time_span.count (), bootstrap_minimum_elapsed_seconds_blockrate);
double blocks_per_sec = static_cast<double> (count) / elapsed_sec;
if (elapsed_sec > bootstrap_connection_warmup_time_sec && blocks_per_sec < bootstrap_minimum_frontier_blocks_per_sec)
{
connection->node->logger.try_log (boost::str (boost::format ("Aborting frontier req because it was too slow")));
promise.set_value (true);
return;
}
if (connection->attempt->should_log ())
{
connection->node->logger.always_log (boost::str (boost::format ("Received %1% frontiers from %2%") % std::to_string (count) % connection->channel->to_string ()));
}
auto transaction (connection->node->store.tx_begin_read ());
if (!account.is_zero ())
{
while (!current.is_zero () && current < account)
{
// We know about an account they don't.
unsynced (frontier, 0);
next (transaction);
}
if (!current.is_zero ())
{
if (account == current)
{
if (latest == frontier)
{
// In sync
}
else
{
if (connection->node->store.block_exists (transaction, latest))
{
// We know about a block they don't.
unsynced (frontier, latest);
}
else
{
connection->attempt->add_pull (nano::pull_info (account, latest, frontier));
// Either we're behind or there's a fork we differ on
// Either way, bulk pushing will probably not be effective
bulk_push_cost += 5;
}
}
next (transaction);
}
else
{
assert (account < current);
connection->attempt->add_pull (nano::pull_info (account, latest, nano::block_hash (0)));
}
}
else
{
connection->attempt->add_pull (nano::pull_info (account, latest, nano::block_hash (0)));
}
receive_frontier ();
}
else
{
while (!current.is_zero ())
{
// We know about an account they don't.
unsynced (frontier, 0);
next (transaction);
}
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log ("Bulk push cost: ", bulk_push_cost);
}
{
try
{
promise.set_value (false);
}
catch (std::future_error &)
{
}
connection->attempt->pool_connection (connection);
}
}
}
else
{
if (connection->node->config.logging.network_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Error while receiving frontier %1%") % ec.message ()));
}
}
}
void nano::frontier_req_client::next (nano::transaction const & transaction_a)
{
// Filling accounts deque to prevent often read transactions
if (accounts.empty ())
{
size_t max_size (128);
for (auto i (connection->node->store.latest_begin (transaction_a, current.number () + 1)), n (connection->node->store.latest_end ()); i != n && accounts.size () != max_size; ++i)
{
nano::account_info const & info (i->second);
nano::account const & account (i->first);
accounts.emplace_back (account, info.head);
}
/* If loop breaks before max_size, then latest_end () is reached
Add empty record to finish frontier_req_server */
if (accounts.size () != max_size)
{
accounts.emplace_back (nano::account (0), nano::block_hash (0));
}
}
// Retrieving accounts from deque
auto const & account_pair (accounts.front ());
current = account_pair.first;
frontier = account_pair.second;
accounts.pop_front ();
}
nano::bulk_pull_client::bulk_pull_client (std::shared_ptr<nano::bootstrap_client> connection_a, nano::pull_info const & pull_a) :
connection (connection_a),
known_account (0),
pull (pull_a),
total_blocks (0),
unexpected_count (0)
{
std::lock_guard<std::mutex> mutex (connection->attempt->mutex);
connection->attempt->condition.notify_all ();
}
nano::bulk_pull_client::~bulk_pull_client ()
{
// If received end block is not expected end block
if (expected != pull.end)
{
pull.head = expected;
if (connection->attempt->mode != nano::bootstrap_mode::legacy)
{
pull.account = expected;
}
pull.processed += total_blocks - unexpected_count;
connection->attempt->requeue_pull (pull);
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Bulk pull end block is not expected %1% for account %2%") % pull.end.to_string () % pull.account.to_account ()));
}
}
else
{
connection->node->bootstrap_initiator.cache.remove (pull);
}
{
std::lock_guard<std::mutex> mutex (connection->attempt->mutex);
--connection->attempt->pulling;
}
connection->attempt->condition.notify_all ();
}
void nano::bulk_pull_client::request ()
{
expected = pull.head;
nano::bulk_pull req;
req.start = (pull.head == pull.head_original) ? pull.account : pull.head; // Account for new pulls, head for cached pulls
req.end = pull.end;
req.count = pull.count;
req.set_count_present (pull.count != 0);
if (connection->node->config.logging.bulk_pull_logging ())
{
std::unique_lock<std::mutex> lock (connection->attempt->mutex);
connection->node->logger.try_log (boost::str (boost::format ("Requesting account %1% from %2%. %3% accounts in queue") % pull.account.to_account () % connection->channel->to_string () % connection->attempt->pulls.size ()));
}
else if (connection->node->config.logging.network_logging () && connection->attempt->should_log ())
{
std::unique_lock<std::mutex> lock (connection->attempt->mutex);
connection->node->logger.always_log (boost::str (boost::format ("%1% accounts in pull queue") % connection->attempt->pulls.size ()));
}
auto this_l (shared_from_this ());
connection->channel->send (
req, [this_l](boost::system::error_code const & ec, size_t size_a) {
if (!ec)
{
this_l->receive_block ();
}
else
{
if (this_l->connection->node->config.logging.bulk_pull_logging ())
{
this_l->connection->node->logger.try_log (boost::str (boost::format ("Error sending bulk pull request to %1%: to %2%") % ec.message () % this_l->connection->channel->to_string ()));
}
this_l->connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull_request_failure, nano::stat::dir::in);
}
},
false); // is bootstrap traffic is_dropable false
}
void nano::bulk_pull_client::receive_block ()
{
auto this_l (shared_from_this ());
connection->channel->socket->async_read (connection->receive_buffer, 1, [this_l](boost::system::error_code const & ec, size_t size_a) {
if (!ec)
{
this_l->received_type ();
}
else
{
if (this_l->connection->node->config.logging.bulk_pull_logging ())
{
this_l->connection->node->logger.try_log (boost::str (boost::format ("Error receiving block type: %1%") % ec.message ()));
}
this_l->connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull_receive_block_failure, nano::stat::dir::in);
}
});
}
void nano::bulk_pull_client::received_type ()
{
auto this_l (shared_from_this ());
nano::block_type type (static_cast<nano::block_type> (connection->receive_buffer->data ()[0]));
switch (type)
{
case nano::block_type::send:
{
connection->channel->socket->async_read (connection->receive_buffer, nano::send_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::receive:
{
connection->channel->socket->async_read (connection->receive_buffer, nano::receive_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::open:
{
connection->channel->socket->async_read (connection->receive_buffer, nano::open_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::change:
{
connection->channel->socket->async_read (connection->receive_buffer, nano::change_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::state:
{
connection->channel->socket->async_read (connection->receive_buffer, nano::state_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::not_a_block:
{
// Avoid re-using slow peers, or peers that sent the wrong blocks.
if (!connection->pending_stop && expected == pull.end)
{
connection->attempt->pool_connection (connection);
}
break;
}
default:
{
if (connection->node->config.logging.network_packet_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Unknown type received as block type: %1%") % static_cast<int> (type)));
}
break;
}
}
}
void nano::bulk_pull_client::received_block (boost::system::error_code const & ec, size_t size_a, nano::block_type type_a)
{
if (!ec)
{
nano::bufferstream stream (connection->receive_buffer->data (), size_a);
std::shared_ptr<nano::block> block (nano::deserialize_block (stream, type_a));
if (block != nullptr && !nano::work_validate (*block))
{
auto hash (block->hash ());
if (connection->node->config.logging.bulk_pull_logging ())
{
std::string block_l;
block->serialize_json (block_l);
connection->node->logger.try_log (boost::str (boost::format ("Pulled block %1% %2%") % hash.to_string () % block_l));
}
// Is block expected?
bool block_expected (false);
if (hash == expected)
{
expected = block->previous ();
block_expected = true;
}
else
{
unexpected_count++;
}
if (total_blocks == 0 && block_expected)
{
known_account = block->account ();
}
if (connection->block_count++ == 0)
{
connection->start_time = std::chrono::steady_clock::now ();
}
connection->attempt->total_blocks++;
total_blocks++;
bool stop_pull (connection->attempt->process_block (block, known_account, total_blocks, block_expected));
if (!stop_pull && !connection->hard_stop.load ())
{
/* Process block in lazy pull if not stopped
Stop usual pull request with unexpected block & more than 16k blocks processed
to prevent spam */
if (connection->attempt->mode != nano::bootstrap_mode::legacy || unexpected_count < 16384)
{
receive_block ();
}
}
else if (stop_pull && block_expected)
{
expected = pull.end;
connection->attempt->pool_connection (connection);
}
if (stop_pull)
{
connection->attempt->lazy_stopped++;
}
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log ("Error deserializing block received from pull request");
}
connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull_deserialize_receive_block, nano::stat::dir::in);
}
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Error bulk receiving block: %1%") % ec.message ()));
}
connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull_receive_block_failure, nano::stat::dir::in);
}
}
nano::bulk_push_client::bulk_push_client (std::shared_ptr<nano::bootstrap_client> const & connection_a) :
connection (connection_a)
{
}
nano::bulk_push_client::~bulk_push_client ()
{
}
void nano::bulk_push_client::start ()
{
nano::bulk_push message;
auto this_l (shared_from_this ());
connection->channel->send (
message, [this_l](boost::system::error_code const & ec, size_t size_a) {
auto transaction (this_l->connection->node->store.tx_begin_read ());
if (!ec)
{
this_l->push (transaction);
}
else
{
if (this_l->connection->node->config.logging.bulk_pull_logging ())
{
this_l->connection->node->logger.try_log (boost::str (boost::format ("Unable to send bulk_push request: %1%") % ec.message ()));
}
}
},
false); // is bootstrap traffic is_dropable false
}
void nano::bulk_push_client::push (nano::transaction const & transaction_a)
{
std::shared_ptr<nano::block> block;
bool finished (false);
while (block == nullptr && !finished)
{
if (current_target.first.is_zero () || current_target.first == current_target.second)
{
std::lock_guard<std::mutex> guard (connection->attempt->mutex);
if (!connection->attempt->bulk_push_targets.empty ())
{
current_target = connection->attempt->bulk_push_targets.back ();
connection->attempt->bulk_push_targets.pop_back ();
}
else
{
finished = true;
}
}
if (!finished)
{
block = connection->node->store.block_get (transaction_a, current_target.first);
if (block == nullptr)
{
current_target.first = nano::block_hash (0);
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log ("Bulk pushing range ", current_target.first.to_string (), " down to ", current_target.second.to_string ());
}
}
}
}
if (finished)
{
send_finished ();
}
else
{
current_target.first = block->previous ();
push_block (*block);
}
}
void nano::bulk_push_client::send_finished ()
{
auto buffer (std::make_shared<std::vector<uint8_t>> ());
buffer->push_back (static_cast<uint8_t> (nano::block_type::not_a_block));
auto this_l (shared_from_this ());
connection->channel->send_buffer (buffer, nano::stat::detail::all, [this_l](boost::system::error_code const & ec, size_t size_a) {
try
{
this_l->promise.set_value (false);
}
catch (std::future_error &)
{
}
});
}
void nano::bulk_push_client::push_block (nano::block const & block_a)
{
auto buffer (std::make_shared<std::vector<uint8_t>> ());
{
nano::vectorstream stream (*buffer);
nano::serialize_block (stream, block_a);
}
auto this_l (shared_from_this ());
connection->channel->send_buffer (buffer, nano::stat::detail::all, [this_l](boost::system::error_code const & ec, size_t size_a) {
if (!ec)
{
auto transaction (this_l->connection->node->store.tx_begin_read ());
this_l->push (transaction);
}
else
{
if (this_l->connection->node->config.logging.bulk_pull_logging ())
{
this_l->connection->node->logger.try_log (boost::str (boost::format ("Error sending block during bulk push: %1%") % ec.message ()));
}
}
});
}
nano::bulk_pull_account_client::bulk_pull_account_client (std::shared_ptr<nano::bootstrap_client> connection_a, nano::account const & account_a) :
connection (connection_a),
account (account_a),
total_blocks (0)
{
connection->attempt->condition.notify_all ();
}
nano::bulk_pull_account_client::~bulk_pull_account_client ()
{
{
std::lock_guard<std::mutex> mutex (connection->attempt->mutex);
--connection->attempt->pulling;
}
connection->attempt->condition.notify_all ();
}
void nano::bulk_pull_account_client::request ()
{
nano::bulk_pull_account req;
req.account = account;
req.minimum_amount = connection->node->config.receive_minimum;
req.flags = nano::bulk_pull_account_flags::pending_hash_and_amount;
if (connection->node->config.logging.bulk_pull_logging ())
{
std::unique_lock<std::mutex> lock (connection->attempt->mutex);
connection->node->logger.try_log (boost::str (boost::format ("Requesting pending for account %1% from %2%. %3% accounts in queue") % req.account.to_account () % connection->channel->to_string () % connection->attempt->wallet_accounts.size ()));
}
else if (connection->node->config.logging.network_logging () && connection->attempt->should_log ())
{
std::unique_lock<std::mutex> lock (connection->attempt->mutex);
connection->node->logger.always_log (boost::str (boost::format ("%1% accounts in pull queue") % connection->attempt->wallet_accounts.size ()));
}
auto this_l (shared_from_this ());
connection->channel->send (
req, [this_l](boost::system::error_code const & ec, size_t size_a) {
if (!ec)
{
this_l->receive_pending ();
}
else
{
this_l->connection->attempt->requeue_pending (this_l->account);
if (this_l->connection->node->config.logging.bulk_pull_logging ())
{
this_l->connection->node->logger.try_log (boost::str (boost::format ("Error starting bulk pull request to %1%: to %2%") % ec.message () % this_l->connection->channel->to_string ()));
}
this_l->connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull_error_starting_request, nano::stat::dir::in);
}
},
false); // is bootstrap traffic is_dropable false
}
void nano::bulk_pull_account_client::receive_pending ()
{
auto this_l (shared_from_this ());
size_t size_l (sizeof (nano::uint256_union) + sizeof (nano::uint128_union));
connection->channel->socket->async_read (connection->receive_buffer, size_l, [this_l, size_l](boost::system::error_code const & ec, size_t size_a) {
// An issue with asio is that sometimes, instead of reporting a bad file descriptor during disconnect,
// we simply get a size of 0.
if (size_a == size_l)
{
if (!ec)
{
nano::block_hash pending;
nano::bufferstream frontier_stream (this_l->connection->receive_buffer->data (), sizeof (nano::uint256_union));
auto error1 (nano::try_read (frontier_stream, pending));
(void)error1;
assert (!error1);
nano::amount balance;
nano::bufferstream balance_stream (this_l->connection->receive_buffer->data () + sizeof (nano::uint256_union), sizeof (nano::uint128_union));
auto error2 (nano::try_read (balance_stream, balance));
(void)error2;
assert (!error2);
if (this_l->total_blocks == 0 || !pending.is_zero ())
{
if (this_l->total_blocks == 0 || balance.number () >= this_l->connection->node->config.receive_minimum.number ())
{
this_l->total_blocks++;
{
if (!pending.is_zero ())
{
auto transaction (this_l->connection->node->store.tx_begin_read ());
if (!this_l->connection->node->store.block_exists (transaction, pending))
{
this_l->connection->attempt->lazy_start (pending);
}
}
}
this_l->receive_pending ();
}
else
{
this_l->connection->attempt->requeue_pending (this_l->account);
}
}
else
{
this_l->connection->attempt->pool_connection (this_l->connection);
}
}
else
{
this_l->connection->attempt->requeue_pending (this_l->account);
if (this_l->connection->node->config.logging.network_logging ())
{
this_l->connection->node->logger.try_log (boost::str (boost::format ("Error while receiving bulk pull account frontier %1%") % ec.message ()));
}
}
}
else
{
this_l->connection->attempt->requeue_pending (this_l->account);
if (this_l->connection->node->config.logging.network_message_logging ())
{
this_l->connection->node->logger.try_log (boost::str (boost::format ("Invalid size: expected %1%, got %2%") % size_l % size_a));
}
}
});
}
nano::pull_info::pull_info (nano::account const & account_a, nano::block_hash const & head_a, nano::block_hash const & end_a, count_t count_a) :
account (account_a),
head (head_a),
head_original (head_a),
end (end_a),
count (count_a)
{
}
nano::bootstrap_attempt::bootstrap_attempt (std::shared_ptr<nano::node> node_a, nano::bootstrap_mode mode_a) :
next_log (std::chrono::steady_clock::now ()),
connections (0),
pulling (0),
node (node_a),
account_count (0),
total_blocks (0),
runs_count (0),
stopped (false),
mode (mode_a),
lazy_stopped (0)
{
node->logger.always_log ("Starting bootstrap attempt");
node->bootstrap_initiator.notify_listeners (true);
}
nano::bootstrap_attempt::~bootstrap_attempt ()
{
node->logger.always_log ("Exiting bootstrap attempt");
node->bootstrap_initiator.notify_listeners (false);
}
bool nano::bootstrap_attempt::should_log ()
{
std::lock_guard<std::mutex> lock (mutex);
auto result (false);
auto now (std::chrono::steady_clock::now ());
if (next_log < now)
{
result = true;
next_log = now + std::chrono::seconds (15);
}
return result;
}
bool nano::bootstrap_attempt::request_frontier (std::unique_lock<std::mutex> & lock_a)
{
auto result (true);
auto connection_l (connection (lock_a));
connection_frontier_request = connection_l;
if (connection_l)
{
std::future<bool> future;
{
auto client (std::make_shared<nano::frontier_req_client> (connection_l));
client->run ();
frontiers = client;
future = client->promise.get_future ();
}
lock_a.unlock ();
result = consume_future (future); // This is out of scope of `client' so when the last reference via boost::asio::io_context is lost and the client is destroyed, the future throws an exception.
lock_a.lock ();
if (result)
{
pulls.clear ();
}
if (node->config.logging.network_logging ())
{
if (!result)
{
node->logger.try_log (boost::str (boost::format ("Completed frontier request, %1% out of sync accounts according to %2%") % pulls.size () % connection_l->channel->to_string ()));
}
else
{
node->stats.inc (nano::stat::type::error, nano::stat::detail::frontier_req, nano::stat::dir::out);
}
}
}
return result;
}
void nano::bootstrap_attempt::request_pull (std::unique_lock<std::mutex> & lock_a)
{
auto connection_l (connection (lock_a));
if (connection_l)
{
auto pull (pulls.front ());
pulls.pop_front ();
if (mode != nano::bootstrap_mode::legacy)
{
// Check if pull is obsolete (head was processed)
std::unique_lock<std::mutex> lock (lazy_mutex);
auto transaction (node->store.tx_begin_read ());
while (!pulls.empty () && !pull.head.is_zero () && (lazy_blocks.find (pull.head) != lazy_blocks.end () || node->store.block_exists (transaction, pull.head)))
{
pull = pulls.front ();
pulls.pop_front ();
}
}
++pulling;
// The bulk_pull_client destructor attempt to requeue_pull which can cause a deadlock if this is the last reference
// Dispatch request in an external thread in case it needs to be destroyed
node->background ([connection_l, pull]() {
auto client (std::make_shared<nano::bulk_pull_client> (connection_l, pull));
client->request ();
});
}
}
void nano::bootstrap_attempt::request_push (std::unique_lock<std::mutex> & lock_a)
{
bool error (false);
if (auto connection_shared = connection_frontier_request.lock ())
{
std::future<bool> future;
{
auto client (std::make_shared<nano::bulk_push_client> (connection_shared));
client->start ();
push = client;
future = client->promise.get_future ();
}
lock_a.unlock ();
error = consume_future (future); // This is out of scope of `client' so when the last reference via boost::asio::io_context is lost and the client is destroyed, the future throws an exception.
lock_a.lock ();
}
if (node->config.logging.network_logging ())
{
node->logger.try_log ("Exiting bulk push client");
if (error)
{
node->logger.try_log ("Bulk push client failed");
}
}
}
bool nano::bootstrap_attempt::still_pulling ()
{
assert (!mutex.try_lock ());
auto running (!stopped);
auto more_pulls (!pulls.empty ());
auto still_pulling (pulling > 0);
return running && (more_pulls || still_pulling);
}
void nano::bootstrap_attempt::run ()
{
assert (!node->flags.disable_legacy_bootstrap);
populate_connections ();
std::unique_lock<std::mutex> lock (mutex);
auto frontier_failure (true);
while (!stopped && frontier_failure)
{
frontier_failure = request_frontier (lock);
}
// Shuffle pulls.
release_assert (std::numeric_limits<CryptoPP::word32>::max () > pulls.size ());
if (!pulls.empty ())
{
for (auto i = static_cast<CryptoPP::word32> (pulls.size () - 1); i > 0; --i)
{
auto k = nano::random_pool::generate_word32 (0, i);
std::swap (pulls[i], pulls[k]);
}
}
while (still_pulling ())
{
while (still_pulling ())
{
if (!pulls.empty ())
{
if (!node->block_processor.full ())
{
request_pull (lock);
}
else
{
condition.wait_for (lock, std::chrono::seconds (15));
}
}
else
{
condition.wait (lock);
}
}
// Flushing may resolve forks which can add more pulls
node->logger.try_log ("Flushing unchecked blocks");
lock.unlock ();
node->block_processor.flush ();
lock.lock ();
node->logger.try_log ("Finished flushing unchecked blocks");
}
if (!stopped)
{
node->logger.try_log ("Completed pulls");
request_push (lock);
runs_count++;
// Start wallet lazy bootstrap if required
if (!wallet_accounts.empty () && !node->flags.disable_wallet_bootstrap)
{
lock.unlock ();
mode = nano::bootstrap_mode::wallet_lazy;
wallet_run ();
lock.lock ();
}
// Start lazy bootstrap if some lazy keys were inserted
else if (runs_count < 3 && !lazy_finished () && !node->flags.disable_lazy_bootstrap)
{
lock.unlock ();
mode = nano::bootstrap_mode::lazy;
lazy_run ();
lock.lock ();
}
if (!node->flags.disable_unchecked_cleanup)
{
node->unchecked_cleanup ();
}
}
stopped = true;
condition.notify_all ();
idle.clear ();
}
std::shared_ptr<nano::bootstrap_client> nano::bootstrap_attempt::connection (std::unique_lock<std::mutex> & lock_a)
{
while (!stopped && idle.empty ())
{
condition.wait (lock_a);
}
std::shared_ptr<nano::bootstrap_client> result;
if (!idle.empty ())
{
result = idle.back ();
idle.pop_back ();
}
return result;
}
bool nano::bootstrap_attempt::consume_future (std::future<bool> & future_a)
{
bool result;
try
{
result = future_a.get ();
}
catch (std::future_error &)
{
result = true;
}
return result;
}
struct block_rate_cmp
{
bool operator() (const std::shared_ptr<nano::bootstrap_client> & lhs, const std::shared_ptr<nano::bootstrap_client> & rhs) const
{
return lhs->block_rate () > rhs->block_rate ();
}
};
unsigned nano::bootstrap_attempt::target_connections (size_t pulls_remaining)
{
if (node->config.bootstrap_connections >= node->config.bootstrap_connections_max)
{
return std::max (1U, node->config.bootstrap_connections_max);
}
// Only scale up to bootstrap_connections_max for large pulls.
double step = std::min (1.0, std::max (0.0, (double)pulls_remaining / bootstrap_connection_scale_target_blocks));
double target = (double)node->config.bootstrap_connections + (double)(node->config.bootstrap_connections_max - node->config.bootstrap_connections) * step;
return std::max (1U, (unsigned)(target + 0.5f));
}
void nano::bootstrap_attempt::populate_connections ()
{
double rate_sum = 0.0;
size_t num_pulls = 0;
std::priority_queue<std::shared_ptr<nano::bootstrap_client>, std::vector<std::shared_ptr<nano::bootstrap_client>>, block_rate_cmp> sorted_connections;
std::unordered_set<nano::tcp_endpoint> endpoints;
{
std::unique_lock<std::mutex> lock (mutex);
num_pulls = pulls.size ();
std::deque<std::weak_ptr<nano::bootstrap_client>> new_clients;
for (auto & c : clients)
{
if (auto client = c.lock ())
{
new_clients.push_back (client);
endpoints.insert (client->channel->socket->remote_endpoint ());
double elapsed_sec = client->elapsed_seconds ();
auto blocks_per_sec = client->block_rate ();
rate_sum += blocks_per_sec;
if (client->elapsed_seconds () > bootstrap_connection_warmup_time_sec && client->block_count > 0)
{
sorted_connections.push (client);
}
// Force-stop the slowest peers, since they can take the whole bootstrap hostage by dribbling out blocks on the last remaining pull.
// This is ~1.5kilobits/sec.
if (elapsed_sec > bootstrap_minimum_termination_time_sec && blocks_per_sec < bootstrap_minimum_blocks_per_sec)
{
if (node->config.logging.bulk_pull_logging ())
{
node->logger.try_log (boost::str (boost::format ("Stopping slow peer %1% (elapsed sec %2%s > %3%s and %4% blocks per second < %5%)") % client->channel->to_string () % elapsed_sec % bootstrap_minimum_termination_time_sec % blocks_per_sec % bootstrap_minimum_blocks_per_sec));
}
client->stop (true);
}
}
}
// Cleanup expired clients
clients.swap (new_clients);
}
auto target = target_connections (num_pulls);
// We only want to drop slow peers when more than 2/3 are active. 2/3 because 1/2 is too aggressive, and 100% rarely happens.
// Probably needs more tuning.
if (sorted_connections.size () >= (target * 2) / 3 && target >= 4)
{
// 4 -> 1, 8 -> 2, 16 -> 4, arbitrary, but seems to work well.
auto drop = (int)roundf (sqrtf ((float)target - 2.0f));
if (node->config.logging.bulk_pull_logging ())
{
node->logger.try_log (boost::str (boost::format ("Dropping %1% bulk pull peers, target connections %2%") % drop % target));
}
for (int i = 0; i < drop; i++)
{
auto client = sorted_connections.top ();
if (node->config.logging.bulk_pull_logging ())
{
node->logger.try_log (boost::str (boost::format ("Dropping peer with block rate %1%, block count %2% (%3%) ") % client->block_rate () % client->block_count % client->channel->to_string ()));
}
client->stop (false);
sorted_connections.pop ();
}
}
if (node->config.logging.bulk_pull_logging ())
{
std::unique_lock<std::mutex> lock (mutex);
node->logger.try_log (boost::str (boost::format ("Bulk pull connections: %1%, rate: %2% blocks/sec, remaining account pulls: %3%, total blocks: %4%") % connections.load () % (int)rate_sum % pulls.size () % (int)total_blocks.load ()));
}
if (connections < target)
{
auto delta = std::min ((target - connections) * 2, bootstrap_max_new_connections);
// TODO - tune this better
// Not many peers respond, need to try to make more connections than we need.
for (auto i = 0u; i < delta; i++)
{
auto endpoint (node->network.bootstrap_peer ());
if (endpoint != nano::tcp_endpoint (boost::asio::ip::address_v6::any (), 0) && endpoints.find (endpoint) == endpoints.end ())
{
connect_client (endpoint);
std::lock_guard<std::mutex> lock (mutex);
endpoints.insert (endpoint);
}
else if (connections == 0)
{
node->logger.try_log (boost::str (boost::format ("Bootstrap stopped because there are no peers")));
stopped = true;
condition.notify_all ();
}
}
}
if (!stopped)
{
std::weak_ptr<nano::bootstrap_attempt> this_w (shared_from_this ());
node->alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (1), [this_w]() {
if (auto this_l = this_w.lock ())
{
this_l->populate_connections ();
}
});
}
}
void nano::bootstrap_attempt::add_connection (nano::endpoint const & endpoint_a)
{
connect_client (nano::tcp_endpoint (endpoint_a.address (), endpoint_a.port ()));
}
void nano::bootstrap_attempt::connect_client (nano::tcp_endpoint const & endpoint_a)
{
++connections;
auto socket (std::make_shared<nano::socket> (node));
auto this_l (shared_from_this ());
socket->async_connect (endpoint_a,
[this_l, socket, endpoint_a](boost::system::error_code const & ec) {
if (!ec)
{
if (this_l->node->config.logging.bulk_pull_logging ())
{
this_l->node->logger.try_log (boost::str (boost::format ("Connection established to %1%") % endpoint_a));
}
auto client (std::make_shared<nano::bootstrap_client> (this_l->node, this_l, std::make_shared<nano::transport::channel_tcp> (*this_l->node, socket)));
this_l->pool_connection (client);
}
else
{
if (this_l->node->config.logging.network_logging ())
{
switch (ec.value ())
{
default:
this_l->node->logger.try_log (boost::str (boost::format ("Error initiating bootstrap connection to %1%: %2%") % endpoint_a % ec.message ()));
break;
case boost::system::errc::connection_refused:
case boost::system::errc::operation_canceled:
case boost::system::errc::timed_out:
case 995: //Windows The I/O operation has been aborted because of either a thread exit or an application request
case 10061: //Windows No connection could be made because the target machine actively refused it
break;
}
}
}
--this_l->connections;
});
}
void nano::bootstrap_attempt::pool_connection (std::shared_ptr<nano::bootstrap_client> client_a)
{
std::lock_guard<std::mutex> lock (mutex);
if (!stopped && !client_a->pending_stop)
{
// Idle bootstrap client socket
client_a->channel->socket->start_timer (node->network_params.node.idle_timeout);
// Push into idle deque
idle.push_front (client_a);
}
condition.notify_all ();
}
void nano::bootstrap_attempt::stop ()
{
std::lock_guard<std::mutex> lock (mutex);
stopped = true;
condition.notify_all ();
for (auto i : clients)
{
if (auto client = i.lock ())
{
client->channel->socket->close ();
}
}
if (auto i = frontiers.lock ())
{
try
{
i->promise.set_value (true);
}
catch (std::future_error &)
{
}
}
if (auto i = push.lock ())
{
try
{
i->promise.set_value (true);
}
catch (std::future_error &)
{
}
}
}
void nano::bootstrap_attempt::add_pull (nano::pull_info const & pull_a)
{
nano::pull_info pull (pull_a);
node->bootstrap_initiator.cache.update_pull (pull);
{
std::lock_guard<std::mutex> lock (mutex);
pulls.push_back (pull);
}
condition.notify_all ();
}
void nano::bootstrap_attempt::requeue_pull (nano::pull_info const & pull_a)
{
auto pull (pull_a);
if (++pull.attempts < (bootstrap_frontier_retry_limit + (pull.processed / 10000)))
{
std::lock_guard<std::mutex> lock (mutex);
pulls.push_front (pull);
condition.notify_all ();
}
else if (mode == nano::bootstrap_mode::lazy)
{
{
// Retry for lazy pulls (not weak state block link assumptions)
std::lock_guard<std::mutex> lock (mutex);
pull.attempts++;
pulls.push_back (pull);
}
condition.notify_all ();
}
else
{
if (node->config.logging.bulk_pull_logging ())
{
node->logger.try_log (boost::str (boost::format ("Failed to pull account %1% down to %2% after %3% attempts and %4% blocks processed") % pull.account.to_account () % pull.end.to_string () % pull.attempts % pull.processed));
}
node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull_failed_account, nano::stat::dir::in);
node->bootstrap_initiator.cache.add (pull);
}
}
void nano::bootstrap_attempt::add_bulk_push_target (nano::block_hash const & head, nano::block_hash const & end)
{
std::lock_guard<std::mutex> lock (mutex);
bulk_push_targets.push_back (std::make_pair (head, end));
}
void nano::bootstrap_attempt::lazy_start (nano::block_hash const & hash_a)
{
std::unique_lock<std::mutex> lock (lazy_mutex);
// Add start blocks, limit 1024 (32k with disabled legacy bootstrap)
size_t max_keys (node->flags.disable_legacy_bootstrap ? 32 * 1024 : 1024);
if (lazy_keys.size () < max_keys && lazy_keys.find (hash_a) == lazy_keys.end () && lazy_blocks.find (hash_a) == lazy_blocks.end ())
{
lazy_keys.insert (hash_a);
lazy_pulls.push_back (hash_a);
}
}
void nano::bootstrap_attempt::lazy_add (nano::block_hash const & hash_a)
{
// Add only unknown blocks
assert (!lazy_mutex.try_lock ());
if (lazy_blocks.find (hash_a) == lazy_blocks.end ())
{
lazy_pulls.push_back (hash_a);
}
}
void nano::bootstrap_attempt::lazy_pull_flush ()
{
assert (!mutex.try_lock ());
std::unique_lock<std::mutex> lazy_lock (lazy_mutex);
auto transaction (node->store.tx_begin_read ());
for (auto & pull_start : lazy_pulls)
{
// Recheck if block was already processed
if (lazy_blocks.find (pull_start) == lazy_blocks.end () && !node->store.block_exists (transaction, pull_start))
{
assert (node->network_params.bootstrap.lazy_max_pull_blocks <= std::numeric_limits<nano::pull_info::count_t>::max ());
pulls.push_back (nano::pull_info (pull_start, pull_start, nano::block_hash (0), static_cast<nano::pull_info::count_t> (node->network_params.bootstrap.lazy_max_pull_blocks)));
}
}
lazy_pulls.clear ();
}
bool nano::bootstrap_attempt::lazy_finished ()
{
bool result (true);
auto transaction (node->store.tx_begin_read ());
std::unique_lock<std::mutex> lock (lazy_mutex);
for (auto it (lazy_keys.begin ()), end (lazy_keys.end ()); it != end && !stopped;)
{
if (node->store.block_exists (transaction, *it))
{
it = lazy_keys.erase (it);
}
else
{
result = false;
break;
// No need to increment `it` as we break above.
}
}
// Finish lazy bootstrap without lazy pulls (in combination with still_pulling ())
if (!result && lazy_pulls.empty ())
{
result = true;
}
return result;
}
void nano::bootstrap_attempt::lazy_clear ()
{
assert (!lazy_mutex.try_lock ());
lazy_blocks.clear ();
lazy_keys.clear ();
lazy_pulls.clear ();
lazy_state_unknown.clear ();
lazy_balances.clear ();
lazy_stopped = 0;
}
void nano::bootstrap_attempt::lazy_run ()
{
assert (!node->flags.disable_lazy_bootstrap);
populate_connections ();
auto start_time (std::chrono::steady_clock::now ());
auto max_time (std::chrono::minutes (node->flags.disable_legacy_bootstrap ? 48 * 60 : 30));
std::unique_lock<std::mutex> lock (mutex);
while ((still_pulling () || !lazy_finished ()) && lazy_stopped < lazy_max_stopped && std::chrono::steady_clock::now () - start_time < max_time)
{
unsigned iterations (0);
while (still_pulling () && lazy_stopped < lazy_max_stopped && std::chrono::steady_clock::now () - start_time < max_time)
{
if (!pulls.empty ())
{
if (!node->block_processor.full ())
{
request_pull (lock);
}
else
{
condition.wait_for (lock, std::chrono::seconds (15));
}
}
else
{
condition.wait (lock);
}
++iterations;
// Flushing lazy pulls
if (iterations % 100 == 0)
{
lazy_pull_flush ();
}
}
// Flushing may resolve forks which can add more pulls
// Flushing lazy pulls
lock.unlock ();
node->block_processor.flush ();
lock.lock ();
lazy_pull_flush ();
}
if (!stopped)
{
node->logger.try_log ("Completed lazy pulls");
std::unique_lock<std::mutex> lazy_lock (lazy_mutex);
runs_count++;
// Start wallet lazy bootstrap if required
if (!wallet_accounts.empty () && !node->flags.disable_wallet_bootstrap)
{
pulls.clear ();
lazy_clear ();
mode = nano::bootstrap_mode::wallet_lazy;
lock.unlock ();
lazy_lock.unlock ();
wallet_run ();
lock.lock ();
}
// Fallback to legacy bootstrap
else if (runs_count < 3 && !lazy_keys.empty () && !node->flags.disable_legacy_bootstrap)
{
pulls.clear ();
lazy_clear ();
mode = nano::bootstrap_mode::legacy;
lock.unlock ();
lazy_lock.unlock ();
run ();
lock.lock ();
}
}
stopped = true;
condition.notify_all ();
idle.clear ();
}
bool nano::bootstrap_attempt::process_block (std::shared_ptr<nano::block> block_a, nano::account const & known_account_a, uint64_t total_blocks, bool block_expected)
{
bool stop_pull (false);
if (mode != nano::bootstrap_mode::legacy && block_expected)
{
auto hash (block_a->hash ());
std::unique_lock<std::mutex> lock (lazy_mutex);
// Processing new blocks
if (lazy_blocks.find (hash) == lazy_blocks.end ())
{
// Search block in ledger (old)
auto transaction (node->store.tx_begin_read ());
if (!node->store.block_exists (transaction, block_a->type (), hash))
{
nano::uint128_t balance (std::numeric_limits<nano::uint128_t>::max ());
nano::unchecked_info info (block_a, known_account_a, 0, nano::signature_verification::unknown);
node->block_processor.add (info);
// Search for new dependencies
if (!block_a->source ().is_zero () && !node->store.block_exists (transaction, block_a->source ()))
{
lazy_add (block_a->source ());
}
else if (block_a->type () == nano::block_type::send)
{
// Calculate balance for legacy send blocks
std::shared_ptr<nano::send_block> block_l (std::static_pointer_cast<nano::send_block> (block_a));
if (block_l != nullptr)
{
balance = block_l->hashables.balance.number ();
}
}
else if (block_a->type () == nano::block_type::state)
{
std::shared_ptr<nano::state_block> block_l (std::static_pointer_cast<nano::state_block> (block_a));
if (block_l != nullptr)
{
balance = block_l->hashables.balance.number ();
nano::block_hash link (block_l->hashables.link);
// If link is not epoch link or 0. And if block from link unknown
if (!link.is_zero () && link != node->ledger.epoch_link && lazy_blocks.find (link) == lazy_blocks.end () && !node->store.block_exists (transaction, link))
{
nano::block_hash previous (block_l->hashables.previous);
// If state block previous is 0 then source block required
if (previous.is_zero ())
{
lazy_add (link);
}
// In other cases previous block balance required to find out subtype of state block
else if (node->store.block_exists (transaction, previous))
{
nano::amount prev_balance (node->ledger.balance (transaction, previous));
if (prev_balance.number () <= balance)
{
lazy_add (link);
}
}
// Search balance of already processed previous blocks
else if (lazy_blocks.find (previous) != lazy_blocks.end ())
{
auto previous_balance (lazy_balances.find (previous));
if (previous_balance != lazy_balances.end ())
{
if (previous_balance->second <= balance)
{
lazy_add (link);
}
lazy_balances.erase (previous_balance);
}
}
// Insert in unknown state blocks if previous wasn't already processed
else
{
lazy_state_unknown.insert (std::make_pair (previous, std::make_pair (link, balance)));
}
}
}
}
lazy_blocks.insert (hash);
// Adding lazy balances
if (total_blocks == 0)
{
lazy_balances.insert (std::make_pair (hash, balance));
}
// Removing lazy balances
if (!block_a->previous ().is_zero () && lazy_balances.find (block_a->previous ()) != lazy_balances.end ())
{
lazy_balances.erase (block_a->previous ());
}
}
// Drop bulk_pull if block is already known (ledger)
else
{
// Disabled until server rewrite
// stop_pull = true;
// Force drop lazy bootstrap connection for long bulk_pull
if (total_blocks > node->network_params.bootstrap.lazy_max_pull_blocks)
{
stop_pull = true;
}
}
//Search unknown state blocks balances
auto find_state (lazy_state_unknown.find (hash));
if (find_state != lazy_state_unknown.end ())
{
auto next_block (find_state->second);
lazy_state_unknown.erase (hash);
// Retrieve balance for previous state blocks
if (block_a->type () == nano::block_type::state)
{
std::shared_ptr<nano::state_block> block_l (std::static_pointer_cast<nano::state_block> (block_a));
if (block_l->hashables.balance.number () <= next_block.second)
{
lazy_add (next_block.first);
}
}
// Retrieve balance for previous legacy send blocks
else if (block_a->type () == nano::block_type::send)
{
std::shared_ptr<nano::send_block> block_l (std::static_pointer_cast<nano::send_block> (block_a));
if (block_l->hashables.balance.number () <= next_block.second)
{
lazy_add (next_block.first);
}
}
// Weak assumption for other legacy block types
else
{
// Disabled
}
}
}
// Drop bulk_pull if block is already known (processed set)
else
{
// Disabled until server rewrite
// stop_pull = true;
// Force drop lazy bootstrap connection for long bulk_pull
if (total_blocks > node->network_params.bootstrap.lazy_max_pull_blocks)
{
stop_pull = true;
}
}
}
else if (mode != nano::bootstrap_mode::legacy)
{
// Drop connection with unexpected block for lazy bootstrap
stop_pull = true;
}
else
{
nano::unchecked_info info (block_a, known_account_a, 0, nano::signature_verification::unknown);
node->block_processor.add (info);
}
return stop_pull;
}
void nano::bootstrap_attempt::request_pending (std::unique_lock<std::mutex> & lock_a)
{
auto connection_l (connection (lock_a));
if (connection_l)
{
auto account (wallet_accounts.front ());
wallet_accounts.pop_front ();
++pulling;
// The bulk_pull_account_client destructor attempt to requeue_pull which can cause a deadlock if this is the last reference
// Dispatch request in an external thread in case it needs to be destroyed
node->background ([connection_l, account]() {
auto client (std::make_shared<nano::bulk_pull_account_client> (connection_l, account));
client->request ();
});
}
}
void nano::bootstrap_attempt::requeue_pending (nano::account const & account_a)
{
auto account (account_a);
{
std::lock_guard<std::mutex> lock (mutex);
wallet_accounts.push_front (account);
condition.notify_all ();
}
}
void nano::bootstrap_attempt::wallet_start (std::deque<nano::account> & accounts_a)
{
std::lock_guard<std::mutex> lock (mutex);
wallet_accounts.swap (accounts_a);
}
bool nano::bootstrap_attempt::wallet_finished ()
{
assert (!mutex.try_lock ());
auto running (!stopped);
auto more_accounts (!wallet_accounts.empty ());
auto still_pulling (pulling > 0);
return running && (more_accounts || still_pulling);
}
void nano::bootstrap_attempt::wallet_run ()
{
assert (!node->flags.disable_wallet_bootstrap);
populate_connections ();
auto start_time (std::chrono::steady_clock::now ());
auto max_time (std::chrono::minutes (10));
std::unique_lock<std::mutex> lock (mutex);
while (wallet_finished () && std::chrono::steady_clock::now () - start_time < max_time)
{
if (!wallet_accounts.empty ())
{
request_pending (lock);
}
else
{
condition.wait (lock);
}
}
if (!stopped)
{
node->logger.try_log ("Completed wallet lazy pulls");
runs_count++;
// Start lazy bootstrap if some lazy keys were inserted
if (!lazy_finished ())
{
lock.unlock ();
lazy_run ();
lock.lock ();
}
}
stopped = true;
condition.notify_all ();
idle.clear ();
}
nano::bootstrap_initiator::bootstrap_initiator (nano::node & node_a) :
node (node_a),
stopped (false),
thread ([this]() {
nano::thread_role::set (nano::thread_role::name::bootstrap_initiator);
run_bootstrap ();
})
{
}
nano::bootstrap_initiator::~bootstrap_initiator ()
{
stop ();
}
void nano::bootstrap_initiator::bootstrap ()
{
std::unique_lock<std::mutex> lock (mutex);
if (!stopped && attempt == nullptr)
{
node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate, nano::stat::dir::out);
attempt = std::make_shared<nano::bootstrap_attempt> (node.shared ());
condition.notify_all ();
}
}
void nano::bootstrap_initiator::bootstrap (nano::endpoint const & endpoint_a, bool add_to_peers)
{
if (add_to_peers)
{
node.network.udp_channels.insert (nano::transport::map_endpoint_to_v6 (endpoint_a), nano::protocol_version);
}
std::unique_lock<std::mutex> lock (mutex);
if (!stopped)
{
while (attempt != nullptr)
{
attempt->stop ();
condition.wait (lock);
}
node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate, nano::stat::dir::out);
attempt = std::make_shared<nano::bootstrap_attempt> (node.shared ());
attempt->add_connection (endpoint_a);
condition.notify_all ();
}
}
void nano::bootstrap_initiator::bootstrap_lazy (nano::block_hash const & hash_a, bool force)
{
{
std::unique_lock<std::mutex> lock (mutex);
if (force)
{
while (attempt != nullptr)
{
attempt->stop ();
condition.wait (lock);
}
}
node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate_lazy, nano::stat::dir::out);
if (attempt == nullptr)
{
attempt = std::make_shared<nano::bootstrap_attempt> (node.shared (), nano::bootstrap_mode::lazy);
}
attempt->lazy_start (hash_a);
}
condition.notify_all ();
}
void nano::bootstrap_initiator::bootstrap_wallet (std::deque<nano::account> & accounts_a)
{
{
std::unique_lock<std::mutex> lock (mutex);
node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate_wallet_lazy, nano::stat::dir::out);
if (attempt == nullptr)
{
attempt = std::make_shared<nano::bootstrap_attempt> (node.shared (), nano::bootstrap_mode::wallet_lazy);
}
attempt->wallet_start (accounts_a);
}
condition.notify_all ();
}
void nano::bootstrap_initiator::run_bootstrap ()
{
std::unique_lock<std::mutex> lock (mutex);
while (!stopped)
{
if (attempt != nullptr)
{
lock.unlock ();
if (attempt->mode == nano::bootstrap_mode::legacy)
{
attempt->run ();
}
else if (attempt->mode == nano::bootstrap_mode::lazy)
{
attempt->lazy_run ();
}
else
{
attempt->wallet_run ();
}
lock.lock ();
attempt = nullptr;
condition.notify_all ();
}
else
{
condition.wait (lock);
}
}
}
void nano::bootstrap_initiator::add_observer (std::function<void(bool)> const & observer_a)
{
std::lock_guard<std::mutex> lock (observers_mutex);
observers.push_back (observer_a);
}
bool nano::bootstrap_initiator::in_progress ()
{
return current_attempt () != nullptr;
}
std::shared_ptr<nano::bootstrap_attempt> nano::bootstrap_initiator::current_attempt ()
{
std::lock_guard<std::mutex> lock (mutex);
return attempt;
}
void nano::bootstrap_initiator::stop ()
{
if (!stopped.exchange (true))
{
{
std::lock_guard<std::mutex> guard (mutex);
if (attempt != nullptr)
{
attempt->stop ();
}
}
condition.notify_all ();
if (thread.joinable ())
{
thread.join ();
}
}
}
void nano::bootstrap_initiator::notify_listeners (bool in_progress_a)
{
std::lock_guard<std::mutex> lock (observers_mutex);
for (auto & i : observers)
{
i (in_progress_a);
}
}
namespace nano
{
std::unique_ptr<seq_con_info_component> collect_seq_con_info (bootstrap_initiator & bootstrap_initiator, const std::string & name)
{
size_t count = 0;
size_t cache_count = 0;
{
std::lock_guard<std::mutex> guard (bootstrap_initiator.observers_mutex);
count = bootstrap_initiator.observers.size ();
}
{
std::lock_guard<std::mutex> guard (bootstrap_initiator.cache.pulls_cache_mutex);
cache_count = bootstrap_initiator.cache.cache.size ();
}
auto sizeof_element = sizeof (decltype (bootstrap_initiator.observers)::value_type);
auto sizeof_cache_element = sizeof (decltype (bootstrap_initiator.cache.cache)::value_type);
auto composite = std::make_unique<seq_con_info_composite> (name);
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "observers", count, sizeof_element }));
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "pulls_cache", cache_count, sizeof_cache_element }));
return composite;
}
}
nano::bootstrap_listener::bootstrap_listener (uint16_t port_a, nano::node & node_a) :
node (node_a),
port (port_a)
{
}
void nano::bootstrap_listener::start ()
{
listening_socket = std::make_shared<nano::server_socket> (node.shared (), boost::asio::ip::tcp::endpoint (boost::asio::ip::address_v6::any (), port), node.config.tcp_incoming_connections_max);
boost::system::error_code ec;
listening_socket->start (ec);
if (ec)
{
node.logger.try_log (boost::str (boost::format ("Error while binding for incoming TCP/bootstrap on port %1%: %2%") % listening_socket->listening_port () % ec.message ()));
throw std::runtime_error (ec.message ());
}
listening_socket->on_connection ([this](std::shared_ptr<nano::socket> new_connection, boost::system::error_code const & ec_a) {
bool keep_accepting = true;
if (ec_a)
{
keep_accepting = false;
this->node.logger.try_log (boost::str (boost::format ("Error while accepting incoming TCP/bootstrap connections: %1%") % ec_a.message ()));
}
else
{
accept_action (ec_a, new_connection);
}
return keep_accepting;
});
}
void nano::bootstrap_listener::stop ()
{
decltype (connections) connections_l;
{
std::lock_guard<std::mutex> lock (mutex);
on = false;
connections_l.swap (connections);
}
if (listening_socket)
{
listening_socket->close ();
listening_socket = nullptr;
}
}
size_t nano::bootstrap_listener::connection_count ()
{
std::lock_guard<std::mutex> lock (mutex);
return connections.size ();
}
void nano::bootstrap_listener::accept_action (boost::system::error_code const & ec, std::shared_ptr<nano::socket> socket_a)
{
auto connection (std::make_shared<nano::bootstrap_server> (socket_a, node.shared ()));
{
std::lock_guard<std::mutex> lock (mutex);
connections[connection.get ()] = connection;
connection->receive ();
}
}
boost::asio::ip::tcp::endpoint nano::bootstrap_listener::endpoint ()
{
return boost::asio::ip::tcp::endpoint (boost::asio::ip::address_v6::loopback (), listening_socket->listening_port ());
}
namespace nano
{
std::unique_ptr<seq_con_info_component> collect_seq_con_info (bootstrap_listener & bootstrap_listener, const std::string & name)
{
auto sizeof_element = sizeof (decltype (bootstrap_listener.connections)::value_type);
auto composite = std::make_unique<seq_con_info_composite> (name);
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "connections", bootstrap_listener.connection_count (), sizeof_element }));
return composite;
}
}
nano::bootstrap_server::~bootstrap_server ()
{
if (node->config.logging.bulk_pull_logging ())
{
node->logger.try_log ("Exiting incoming TCP/bootstrap server");
}
if (type == nano::bootstrap_server_type::bootstrap)
{
--node->bootstrap.bootstrap_count;
}
else if (type == nano::bootstrap_server_type::realtime)
{
--node->bootstrap.realtime_count;
node->network.response_channels.remove (remote_endpoint);
// Clear temporary channel
auto exisiting_response_channel (node->network.tcp_channels.find_channel (remote_endpoint));
if (exisiting_response_channel != nullptr)
{
exisiting_response_channel->server = false;
node->network.tcp_channels.erase (remote_endpoint);
}
}
stop ();
std::lock_guard<std::mutex> lock (node->bootstrap.mutex);
node->bootstrap.connections.erase (this);
}
void nano::bootstrap_server::stop ()
{
if (!stopped.exchange (true))
{
if (socket != nullptr)
{
socket->close ();
}
}
}
nano::bootstrap_server::bootstrap_server (std::shared_ptr<nano::socket> socket_a, std::shared_ptr<nano::node> node_a) :
receive_buffer (std::make_shared<std::vector<uint8_t>> ()),
socket (socket_a),
node (node_a)
{
receive_buffer->resize (512);
}
void nano::bootstrap_server::receive ()
{
// Increase timeout to receive TCP header (idle server socket)
socket->set_timeout (node->network_params.node.idle_timeout);
auto this_l (shared_from_this ());
socket->async_read (receive_buffer, 8, [this_l](boost::system::error_code const & ec, size_t size_a) {
// Set remote_endpoint
if (this_l->remote_endpoint.port () == 0)
{
this_l->remote_endpoint = this_l->socket->remote_endpoint ();
}
// Decrease timeout to default
this_l->socket->set_timeout (this_l->node->config.tcp_io_timeout);
// Receive header
this_l->receive_header_action (ec, size_a);
});
}
void nano::bootstrap_server::receive_header_action (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
assert (size_a == 8);
nano::bufferstream type_stream (receive_buffer->data (), size_a);
auto error (false);
nano::message_header header (error, type_stream);
if (!error)
{
switch (header.type)
{
case nano::message_type::bulk_pull:
{
node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull, nano::stat::dir::in);
auto this_l (shared_from_this ());
socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) {
this_l->receive_bulk_pull_action (ec, size_a, header);
});
break;
}
case nano::message_type::bulk_pull_account:
{
node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull_account, nano::stat::dir::in);
auto this_l (shared_from_this ());
socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) {
this_l->receive_bulk_pull_account_action (ec, size_a, header);
});
break;
}
case nano::message_type::frontier_req:
{
node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::frontier_req, nano::stat::dir::in);
auto this_l (shared_from_this ());
socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) {
this_l->receive_frontier_req_action (ec, size_a, header);
});
break;
}
case nano::message_type::bulk_push:
{
node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_push, nano::stat::dir::in);
if (is_bootstrap_connection ())
{
add_request (std::unique_ptr<nano::message> (new nano::bulk_push (header)));
}
break;
}
case nano::message_type::keepalive:
{
auto this_l (shared_from_this ());
socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) {
this_l->receive_keepalive_action (ec, size_a, header);
});
break;
}
case nano::message_type::publish:
{
auto this_l (shared_from_this ());
socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) {
this_l->receive_publish_action (ec, size_a, header);
});
break;
}
case nano::message_type::confirm_ack:
{
auto this_l (shared_from_this ());
socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) {
this_l->receive_confirm_ack_action (ec, size_a, header);
});
break;
}
case nano::message_type::confirm_req:
{
auto this_l (shared_from_this ());
socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) {
this_l->receive_confirm_req_action (ec, size_a, header);
});
break;
}
case nano::message_type::node_id_handshake:
{
auto this_l (shared_from_this ());
socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) {
this_l->receive_node_id_handshake_action (ec, size_a, header);
});
break;
}
default:
{
if (node->config.logging.network_logging ())
{
node->logger.try_log (boost::str (boost::format ("Received invalid type from bootstrap connection %1%") % static_cast<uint8_t> (header.type)));
}
break;
}
}
}
}
else
{
if (node->config.logging.bulk_pull_logging ())
{
node->logger.try_log (boost::str (boost::format ("Error while receiving type: %1%") % ec.message ()));
}
}
}
void nano::bootstrap_server::receive_bulk_pull_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a)
{
if (!ec)
{
auto error (false);
nano::bufferstream stream (receive_buffer->data (), size_a);
std::unique_ptr<nano::bulk_pull> request (new nano::bulk_pull (error, stream, header_a));
if (!error)
{
if (node->config.logging.bulk_pull_logging ())
{
node->logger.try_log (boost::str (boost::format ("Received bulk pull for %1% down to %2%, maximum of %3%") % request->start.to_string () % request->end.to_string () % (request->count ? request->count : std::numeric_limits<double>::infinity ())));
}
if (is_bootstrap_connection ())
{
add_request (std::unique_ptr<nano::message> (request.release ()));
}
receive ();
}
}
}
void nano::bootstrap_server::receive_bulk_pull_account_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a)
{
if (!ec)
{
auto error (false);
assert (size_a == header_a.payload_length_bytes ());
nano::bufferstream stream (receive_buffer->data (), size_a);
std::unique_ptr<nano::bulk_pull_account> request (new nano::bulk_pull_account (error, stream, header_a));
if (!error)
{
if (node->config.logging.bulk_pull_logging ())
{
node->logger.try_log (boost::str (boost::format ("Received bulk pull account for %1% with a minimum amount of %2%") % request->account.to_account () % nano::amount (request->minimum_amount).format_balance (nano::Mxrb_ratio, 10, true)));
}
if (is_bootstrap_connection ())
{
add_request (std::unique_ptr<nano::message> (request.release ()));
}
receive ();
}
}
}
void nano::bootstrap_server::receive_frontier_req_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a)
{
if (!ec)
{
auto error (false);
nano::bufferstream stream (receive_buffer->data (), size_a);
std::unique_ptr<nano::frontier_req> request (new nano::frontier_req (error, stream, header_a));
if (!error)
{
if (node->config.logging.bulk_pull_logging ())
{
node->logger.try_log (boost::str (boost::format ("Received frontier request for %1% with age %2%") % request->start.to_string () % request->age));
}
if (is_bootstrap_connection ())
{
add_request (std::unique_ptr<nano::message> (request.release ()));
}
receive ();
}
}
else
{
if (node->config.logging.network_logging ())
{
node->logger.try_log (boost::str (boost::format ("Error sending receiving frontier request: %1%") % ec.message ()));
}
}
}
void nano::bootstrap_server::receive_keepalive_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a)
{
if (!ec)
{
auto error (false);
nano::bufferstream stream (receive_buffer->data (), size_a);
std::unique_ptr<nano::keepalive> request (new nano::keepalive (error, stream, header_a));
if (!error)
{
if (type == nano::bootstrap_server_type::realtime || type == nano::bootstrap_server_type::realtime_response_server)
{
add_request (std::unique_ptr<nano::message> (request.release ()));
}
receive ();
}
}
else
{
if (node->config.logging.network_keepalive_logging ())
{
node->logger.try_log (boost::str (boost::format ("Error receiving keepalive: %1%") % ec.message ()));
}
}
}
void nano::bootstrap_server::receive_publish_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a)
{
if (!ec)
{
auto error (false);
nano::bufferstream stream (receive_buffer->data (), size_a);
std::unique_ptr<nano::publish> request (new nano::publish (error, stream, header_a));
if (!error)
{
if (type == nano::bootstrap_server_type::realtime || type == nano::bootstrap_server_type::realtime_response_server)
{
add_request (std::unique_ptr<nano::message> (request.release ()));
}
receive ();
}
}
else
{
if (node->config.logging.network_message_logging ())
{
node->logger.try_log (boost::str (boost::format ("Error receiving publish: %1%") % ec.message ()));
}
}
}
void nano::bootstrap_server::receive_confirm_req_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a)
{
if (!ec)
{
auto error (false);
nano::bufferstream stream (receive_buffer->data (), size_a);
std::unique_ptr<nano::confirm_req> request (new nano::confirm_req (error, stream, header_a));
if (!error)
{
if (type == nano::bootstrap_server_type::realtime || type == nano::bootstrap_server_type::realtime_response_server)
{
add_request (std::unique_ptr<nano::message> (request.release ()));
}
receive ();
}
}
else if (node->config.logging.network_message_logging ())
{
node->logger.try_log (boost::str (boost::format ("Error receiving confirm_req: %1%") % ec.message ()));
}
}
void nano::bootstrap_server::receive_confirm_ack_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a)
{
if (!ec)
{
auto error (false);
nano::bufferstream stream (receive_buffer->data (), size_a);
std::unique_ptr<nano::confirm_ack> request (new nano::confirm_ack (error, stream, header_a));
if (!error)
{
if (type == nano::bootstrap_server_type::realtime || type == nano::bootstrap_server_type::realtime_response_server)
{
add_request (std::unique_ptr<nano::message> (request.release ()));
}
receive ();
}
}
else if (node->config.logging.network_message_logging ())
{
node->logger.try_log (boost::str (boost::format ("Error receiving confirm_ack: %1%") % ec.message ()));
}
}
void nano::bootstrap_server::receive_node_id_handshake_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a)
{
if (!ec)
{
auto error (false);
nano::bufferstream stream (receive_buffer->data (), size_a);
std::unique_ptr<nano::node_id_handshake> request (new nano::node_id_handshake (error, stream, header_a));
if (!error)
{
if (type == nano::bootstrap_server_type::undefined && !node->flags.disable_tcp_realtime)
{
add_request (std::unique_ptr<nano::message> (request.release ()));
}
receive ();
}
}
else if (node->config.logging.network_node_id_handshake_logging ())
{
node->logger.try_log (boost::str (boost::format ("Error receiving node_id_handshake: %1%") % ec.message ()));
}
}
void nano::bootstrap_server::add_request (std::unique_ptr<nano::message> message_a)
{
assert (message_a != nullptr);
std::lock_guard<std::mutex> lock (mutex);
auto start (requests.empty ());
requests.push (std::move (message_a));
if (start)
{
run_next ();
}
}
void nano::bootstrap_server::finish_request ()
{
std::lock_guard<std::mutex> lock (mutex);
requests.pop ();
if (!requests.empty ())
{
run_next ();
}
else
{
std::weak_ptr<nano::bootstrap_server> this_w (shared_from_this ());
node->alarm.add (std::chrono::steady_clock::now () + (node->config.tcp_io_timeout * 2) + std::chrono::seconds (1), [this_w]() {
if (auto this_l = this_w.lock ())
{
this_l->timeout ();
}
});
}
}
void nano::bootstrap_server::finish_request_async ()
{
std::weak_ptr<nano::bootstrap_server> this_w (shared_from_this ());
node->background ([this_w]() {
if (auto this_l = this_w.lock ())
{
this_l->finish_request ();
}
});
}
void nano::bootstrap_server::timeout ()
{
if (socket != nullptr)
{
if (socket->has_timed_out ())
{
if (node->config.logging.bulk_pull_logging ())
{
node->logger.try_log ("Closing incoming tcp / bootstrap server by timeout");
}
{
std::lock_guard<std::mutex> lock (node->bootstrap.mutex);
node->bootstrap.connections.erase (this);
}
socket->close ();
}
}
else
{
std::lock_guard<std::mutex> lock (node->bootstrap.mutex);
node->bootstrap.connections.erase (this);
}
}
namespace
{
class request_response_visitor : public nano::message_visitor
{
public:
request_response_visitor (std::shared_ptr<nano::bootstrap_server> connection_a) :
connection (connection_a)
{
}
virtual ~request_response_visitor () = default;
void keepalive (nano::keepalive const & message_a) override
{
bool first_keepalive (connection->keepalive_first);
if (first_keepalive)
{
connection->keepalive_first = false;
}
connection->finish_request_async ();
auto connection_l (connection->shared_from_this ());
connection->node->background ([connection_l, message_a, first_keepalive]() {
connection_l->node->network.tcp_channels.process_keepalive (message_a, connection_l->remote_endpoint, first_keepalive);
});
}
void publish (nano::publish const & message_a) override
{
connection->finish_request_async ();
auto connection_l (connection->shared_from_this ());
connection->node->background ([connection_l, message_a]() {
connection_l->node->network.tcp_channels.process_message (message_a, connection_l->remote_endpoint, connection_l->remote_node_id, connection_l->socket, connection_l->type);
});
}
void confirm_req (nano::confirm_req const & message_a) override
{
connection->finish_request_async ();
auto connection_l (connection->shared_from_this ());
connection->node->background ([connection_l, message_a]() {
connection_l->node->network.tcp_channels.process_message (message_a, connection_l->remote_endpoint, connection_l->remote_node_id, connection_l->socket, connection_l->type);
});
}
void confirm_ack (nano::confirm_ack const & message_a) override
{
connection->finish_request_async ();
auto connection_l (connection->shared_from_this ());
connection->node->background ([connection_l, message_a]() {
connection_l->node->network.tcp_channels.process_message (message_a, connection_l->remote_endpoint, connection_l->remote_node_id, connection_l->socket, connection_l->type);
});
}
void bulk_pull (nano::bulk_pull const &) override
{
auto response (std::make_shared<nano::bulk_pull_server> (connection, std::unique_ptr<nano::bulk_pull> (static_cast<nano::bulk_pull *> (connection->requests.front ().release ()))));
response->send_next ();
}
void bulk_pull_account (nano::bulk_pull_account const &) override
{
auto response (std::make_shared<nano::bulk_pull_account_server> (connection, std::unique_ptr<nano::bulk_pull_account> (static_cast<nano::bulk_pull_account *> (connection->requests.front ().release ()))));
response->send_frontier ();
}
void bulk_push (nano::bulk_push const &) override
{
auto response (std::make_shared<nano::bulk_push_server> (connection));
response->receive ();
}
void frontier_req (nano::frontier_req const &) override
{
auto response (std::make_shared<nano::frontier_req_server> (connection, std::unique_ptr<nano::frontier_req> (static_cast<nano::frontier_req *> (connection->requests.front ().release ()))));
response->send_next ();
}
void node_id_handshake (nano::node_id_handshake const & message_a) override
{
if (connection->node->config.logging.network_node_id_handshake_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Received node_id_handshake message from %1%") % connection->remote_endpoint));
}
if (message_a.query)
{
boost::optional<std::pair<nano::account, nano::signature>> response (std::make_pair (connection->node->node_id.pub, nano::sign_message (connection->node->node_id.prv, connection->node->node_id.pub, *message_a.query)));
assert (!nano::validate_message (response->first, *message_a.query, response->second));
auto cookie (connection->node->network.syn_cookies.assign (nano::transport::map_tcp_to_endpoint (connection->remote_endpoint)));
nano::node_id_handshake response_message (cookie, response);
auto bytes = response_message.to_bytes ();
// clang-format off
connection->socket->async_write (bytes, [ bytes, connection = connection ](boost::system::error_code const & ec, size_t size_a) {
if (ec)
{
if (connection->node->config.logging.network_node_id_handshake_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Error sending node_id_handshake to %1%: %2%") % connection->remote_endpoint % ec.message ()));
}
// Stop invalid handshake
connection->stop ();
}
else
{
connection->node->stats.inc (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::out);
connection->finish_request ();
}
});
// clang-format on
}
else if (message_a.response)
{
nano::account const & node_id (message_a.response->first);
if (!connection->node->network.syn_cookies.validate (nano::transport::map_tcp_to_endpoint (connection->remote_endpoint), node_id, message_a.response->second) && node_id != connection->node->node_id.pub)
{
connection->remote_node_id = node_id;
connection->type = nano::bootstrap_server_type::realtime;
++connection->node->bootstrap.realtime_count;
connection->finish_request_async ();
}
else
{
// Stop invalid handshake
connection->stop ();
}
}
else
{
connection->finish_request_async ();
}
nano::account node_id (connection->remote_node_id);
nano::bootstrap_server_type type (connection->type);
assert (node_id.is_zero () || type == nano::bootstrap_server_type::realtime);
auto connection_l (connection->shared_from_this ());
connection->node->background ([connection_l, message_a, node_id, type]() {
connection_l->node->network.tcp_channels.process_message (message_a, connection_l->remote_endpoint, node_id, connection_l->socket, type);
});
}
std::shared_ptr<nano::bootstrap_server> connection;
};
}
void nano::bootstrap_server::run_next ()
{
assert (!requests.empty ());
request_response_visitor visitor (shared_from_this ());
requests.front ()->visit (visitor);
}
bool nano::bootstrap_server::is_bootstrap_connection ()
{
if (type == nano::bootstrap_server_type::undefined && !node->flags.disable_bootstrap_listener && node->bootstrap.bootstrap_count < node->config.bootstrap_connections_max)
{
++node->bootstrap.bootstrap_count;
type = nano::bootstrap_server_type::bootstrap;
}
return type == nano::bootstrap_server_type::bootstrap;
}
/**
* Handle a request for the pull of all blocks associated with an account
* The account is supplied as the "start" member, and the final block to
* send is the "end" member. The "start" member may also be a block
* hash, in which case the that hash is used as the start of a chain
* to send. To determine if "start" is interpretted as an account or
* hash, the ledger is checked to see if the block specified exists,
* if not then it is interpretted as an account.
*
* Additionally, if "start" is specified as a block hash the range
* is inclusive of that block hash, that is the range will be:
* [start, end); In the case that a block hash is not specified the
* range will be exclusive of the frontier for that account with
* a range of (frontier, end)
*/
void nano::bulk_pull_server::set_current_end ()
{
include_start = false;
assert (request != nullptr);
auto transaction (connection->node->store.tx_begin_read ());
if (!connection->node->store.block_exists (transaction, request->end))
{
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Bulk pull end block doesn't exist: %1%, sending everything") % request->end.to_string ()));
}
request->end.clear ();
}
if (connection->node->store.block_exists (transaction, request->start))
{
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Bulk pull request for block hash: %1%") % request->start.to_string ()));
}
current = request->start;
include_start = true;
}
else
{
nano::account_info info;
auto no_address (connection->node->store.account_get (transaction, request->start, info));
if (no_address)
{
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Request for unknown account: %1%") % request->start.to_account ()));
}
current = request->end;
}
else
{
current = info.head;
if (!request->end.is_zero ())
{
auto account (connection->node->ledger.account (transaction, request->end));
if (account != request->start)
{
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Request for block that is not on account chain: %1% not on %2%") % request->end.to_string () % request->start.to_account ()));
}
current = request->end;
}
}
}
}
sent_count = 0;
if (request->is_count_present ())
{
max_count = request->count;
}
else
{
max_count = 0;
}
}
void nano::bulk_pull_server::send_next ()
{
auto block (get_next ());
if (block != nullptr)
{
{
send_buffer->clear ();
nano::vectorstream stream (*send_buffer);
nano::serialize_block (stream, *block);
}
auto this_l (shared_from_this ());
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Sending block: %1%") % block->hash ().to_string ()));
}
connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->sent_action (ec, size_a);
});
}
else
{
send_finished ();
}
}
std::shared_ptr<nano::block> nano::bulk_pull_server::get_next ()
{
std::shared_ptr<nano::block> result;
bool send_current = false, set_current_to_end = false;
/*
* Determine if we should reply with a block
*
* If our cursor is on the final block, we should signal that we
* are done by returning a null result.
*
* Unless we are including the "start" member and this is the
* start member, then include it anyway.
*/
if (current != request->end)
{
send_current = true;
}
else if (current == request->end && include_start == true)
{
send_current = true;
/*
* We also need to ensure that the next time
* are invoked that we return a null result
*/
set_current_to_end = true;
}
/*
* Account for how many blocks we have provided. If this
* exceeds the requested maximum, return an empty object
* to signal the end of results
*/
if (max_count != 0 && sent_count >= max_count)
{
send_current = false;
}
if (send_current)
{
auto transaction (connection->node->store.tx_begin_read ());
result = connection->node->store.block_get (transaction, current);
if (result != nullptr && set_current_to_end == false)
{
auto previous (result->previous ());
if (!previous.is_zero ())
{
current = previous;
}
else
{
current = request->end;
}
}
else
{
current = request->end;
}
sent_count++;
}
/*
* Once we have processed "get_next()" once our cursor is no longer on
* the "start" member, so this flag is not relevant is always false.
*/
include_start = false;
return result;
}
void nano::bulk_pull_server::sent_action (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
send_next ();
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Unable to bulk send block: %1%") % ec.message ()));
}
}
}
void nano::bulk_pull_server::send_finished ()
{
send_buffer->clear ();
send_buffer->push_back (static_cast<uint8_t> (nano::block_type::not_a_block));
auto this_l (shared_from_this ());
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log ("Bulk sending finished");
}
connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->no_block_sent (ec, size_a);
});
}
void nano::bulk_pull_server::no_block_sent (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
assert (size_a == 1);
connection->finish_request ();
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log ("Unable to send not-a-block");
}
}
}
nano::bulk_pull_server::bulk_pull_server (std::shared_ptr<nano::bootstrap_server> const & connection_a, std::unique_ptr<nano::bulk_pull> request_a) :
connection (connection_a),
request (std::move (request_a)),
send_buffer (std::make_shared<std::vector<uint8_t>> ())
{
set_current_end ();
}
/**
* Bulk pull blocks related to an account
*/
void nano::bulk_pull_account_server::set_params ()
{
assert (request != nullptr);
/*
* Parse the flags
*/
invalid_request = false;
pending_include_address = false;
pending_address_only = false;
if (request->flags == nano::bulk_pull_account_flags::pending_address_only)
{
pending_address_only = true;
}
else if (request->flags == nano::bulk_pull_account_flags::pending_hash_amount_and_address)
{
/**
** This is the same as "pending_hash_and_amount" but with the
** sending address appended, for UI purposes mainly.
**/
pending_include_address = true;
}
else if (request->flags == nano::bulk_pull_account_flags::pending_hash_and_amount)
{
/** The defaults are set above **/
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Invalid bulk_pull_account flags supplied %1%") % static_cast<uint8_t> (request->flags)));
}
invalid_request = true;
return;
}
/*
* Initialize the current item from the requested account
*/
current_key.account = request->account;
current_key.hash = 0;
}
void nano::bulk_pull_account_server::send_frontier ()
{
/*
* This function is really the entry point into this class,
* so handle the invalid_request case by terminating the
* request without any response
*/
if (!invalid_request)
{
auto stream_transaction (connection->node->store.tx_begin_read ());
// Get account balance and frontier block hash
auto account_frontier_hash (connection->node->ledger.latest (stream_transaction, request->account));
auto account_frontier_balance_int (connection->node->ledger.account_balance (stream_transaction, request->account));
nano::uint128_union account_frontier_balance (account_frontier_balance_int);
// Write the frontier block hash and balance into a buffer
send_buffer->clear ();
{
nano::vectorstream output_stream (*send_buffer);
write (output_stream, account_frontier_hash.bytes);
write (output_stream, account_frontier_balance.bytes);
}
// Send the buffer to the requestor
auto this_l (shared_from_this ());
connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->sent_action (ec, size_a);
});
}
}
void nano::bulk_pull_account_server::send_next_block ()
{
/*
* Get the next item from the queue, it is a tuple with the key (which
* contains the account and hash) and data (which contains the amount)
*/
auto block_data (get_next ());
auto block_info_key (block_data.first.get ());
auto block_info (block_data.second.get ());
if (block_info_key != nullptr)
{
/*
* If we have a new item, emit it to the socket
*/
send_buffer->clear ();
if (pending_address_only)
{
nano::vectorstream output_stream (*send_buffer);
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Sending address: %1%") % block_info->source.to_string ()));
}
write (output_stream, block_info->source.bytes);
}
else
{
nano::vectorstream output_stream (*send_buffer);
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Sending block: %1%") % block_info_key->hash.to_string ()));
}
write (output_stream, block_info_key->hash.bytes);
write (output_stream, block_info->amount.bytes);
if (pending_include_address)
{
/**
** Write the source address as well, if requested
**/
write (output_stream, block_info->source.bytes);
}
}
auto this_l (shared_from_this ());
connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->sent_action (ec, size_a);
});
}
else
{
/*
* Otherwise, finalize the connection
*/
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Done sending blocks")));
}
send_finished ();
}
}
std::pair<std::unique_ptr<nano::pending_key>, std::unique_ptr<nano::pending_info>> nano::bulk_pull_account_server::get_next ()
{
std::pair<std::unique_ptr<nano::pending_key>, std::unique_ptr<nano::pending_info>> result;
while (true)
{
/*
* For each iteration of this loop, establish and then
* destroy a database transaction, to avoid locking the
* database for a prolonged period.
*/
auto stream_transaction (connection->node->store.tx_begin_read ());
auto stream (connection->node->store.pending_begin (stream_transaction, current_key));
if (stream == nano::store_iterator<nano::pending_key, nano::pending_info> (nullptr))
{
break;
}
nano::pending_key key (stream->first);
nano::pending_info info (stream->second);
/*
* Get the key for the next value, to use in the next call or iteration
*/
current_key.account = key.account;
current_key.hash = key.hash.number () + 1;
/*
* Finish up if the response is for a different account
*/
if (key.account != request->account)
{
break;
}
/*
* Skip entries where the amount is less than the requested
* minimum
*/
if (info.amount < request->minimum_amount)
{
continue;
}
/*
* If the pending_address_only flag is set, de-duplicate the
* responses. The responses are the address of the sender,
* so they are are part of the pending table's information
* and not key, so we have to de-duplicate them manually.
*/
if (pending_address_only)
{
if (!deduplication.insert (info.source).second)
{
/*
* If the deduplication map gets too
* large, clear it out. This may
* result in some duplicates getting
* sent to the client, but we do not
* want to commit too much memory
*/
if (deduplication.size () > 4096)
{
deduplication.clear ();
}
continue;
}
}
result.first = std::unique_ptr<nano::pending_key> (new nano::pending_key (key));
result.second = std::unique_ptr<nano::pending_info> (new nano::pending_info (info));
break;
}
return result;
}
void nano::bulk_pull_account_server::sent_action (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
send_next_block ();
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Unable to bulk send block: %1%") % ec.message ()));
}
}
}
void nano::bulk_pull_account_server::send_finished ()
{
/*
* The "bulk_pull_account" final sequence is a final block of all
* zeros. If we are sending only account public keys (with the
* "pending_address_only" flag) then it will be 256-bits of zeros,
* otherwise it will be either 384-bits of zeros (if the
* "pending_include_address" flag is not set) or 640-bits of zeros
* (if that flag is set).
*/
send_buffer->clear ();
{
nano::vectorstream output_stream (*send_buffer);
nano::uint256_union account_zero (0);
nano::uint128_union balance_zero (0);
write (output_stream, account_zero.bytes);
if (!pending_address_only)
{
write (output_stream, balance_zero.bytes);
if (pending_include_address)
{
write (output_stream, account_zero.bytes);
}
}
}
auto this_l (shared_from_this ());
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log ("Bulk sending for an account finished");
}
connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->complete (ec, size_a);
});
}
void nano::bulk_pull_account_server::complete (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
if (pending_address_only)
{
assert (size_a == 32);
}
else
{
if (pending_include_address)
{
assert (size_a == 80);
}
else
{
assert (size_a == 48);
}
}
connection->finish_request ();
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log ("Unable to pending-as-zero");
}
}
}
nano::bulk_pull_account_server::bulk_pull_account_server (std::shared_ptr<nano::bootstrap_server> const & connection_a, std::unique_ptr<nano::bulk_pull_account> request_a) :
connection (connection_a),
request (std::move (request_a)),
send_buffer (std::make_shared<std::vector<uint8_t>> ()),
current_key (0, 0)
{
/*
* Setup the streaming response for the first call to "send_frontier" and "send_next_block"
*/
set_params ();
}
nano::bulk_push_server::bulk_push_server (std::shared_ptr<nano::bootstrap_server> const & connection_a) :
receive_buffer (std::make_shared<std::vector<uint8_t>> ()),
connection (connection_a)
{
receive_buffer->resize (256);
}
void nano::bulk_push_server::receive ()
{
if (connection->node->bootstrap_initiator.in_progress ())
{
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log ("Aborting bulk_push because a bootstrap attempt is in progress");
}
}
else
{
auto this_l (shared_from_this ());
connection->socket->async_read (receive_buffer, 1, [this_l](boost::system::error_code const & ec, size_t size_a) {
if (!ec)
{
this_l->received_type ();
}
else
{
if (this_l->connection->node->config.logging.bulk_pull_logging ())
{
this_l->connection->node->logger.try_log (boost::str (boost::format ("Error receiving block type: %1%") % ec.message ()));
}
}
});
}
}
void nano::bulk_push_server::received_type ()
{
auto this_l (shared_from_this ());
nano::block_type type (static_cast<nano::block_type> (receive_buffer->data ()[0]));
switch (type)
{
case nano::block_type::send:
{
connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::send, nano::stat::dir::in);
connection->socket->async_read (receive_buffer, nano::send_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::receive:
{
connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::receive, nano::stat::dir::in);
connection->socket->async_read (receive_buffer, nano::receive_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::open:
{
connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::open, nano::stat::dir::in);
connection->socket->async_read (receive_buffer, nano::open_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::change:
{
connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::change, nano::stat::dir::in);
connection->socket->async_read (receive_buffer, nano::change_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::state:
{
connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::state_block, nano::stat::dir::in);
connection->socket->async_read (receive_buffer, nano::state_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::not_a_block:
{
connection->finish_request ();
break;
}
default:
{
if (connection->node->config.logging.network_packet_logging ())
{
connection->node->logger.try_log ("Unknown type received as block type");
}
break;
}
}
}
void nano::bulk_push_server::received_block (boost::system::error_code const & ec, size_t size_a, nano::block_type type_a)
{
if (!ec)
{
nano::bufferstream stream (receive_buffer->data (), size_a);
auto block (nano::deserialize_block (stream, type_a));
if (block != nullptr && !nano::work_validate (*block))
{
if (!connection->node->block_processor.full ())
{
connection->node->process_active (std::move (block));
}
receive ();
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log ("Error deserializing block received from pull request");
}
}
}
}
nano::frontier_req_server::frontier_req_server (std::shared_ptr<nano::bootstrap_server> const & connection_a, std::unique_ptr<nano::frontier_req> request_a) :
connection (connection_a),
current (request_a->start.number () - 1),
frontier (0),
request (std::move (request_a)),
send_buffer (std::make_shared<std::vector<uint8_t>> ()),
count (0)
{
next ();
}
void nano::frontier_req_server::send_next ()
{
if (!current.is_zero () && count < request->count)
{
{
send_buffer->clear ();
nano::vectorstream stream (*send_buffer);
write (stream, current.bytes);
write (stream, frontier.bytes);
}
auto this_l (shared_from_this ());
if (connection->node->config.logging.bulk_pull_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Sending frontier for %1% %2%") % current.to_account () % frontier.to_string ()));
}
next ();
connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->sent_action (ec, size_a);
});
}
else
{
send_finished ();
}
}
void nano::frontier_req_server::send_finished ()
{
{
send_buffer->clear ();
nano::vectorstream stream (*send_buffer);
nano::uint256_union zero (0);
write (stream, zero.bytes);
write (stream, zero.bytes);
}
auto this_l (shared_from_this ());
if (connection->node->config.logging.network_logging ())
{
connection->node->logger.try_log ("Frontier sending finished");
}
connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->no_block_sent (ec, size_a);
});
}
void nano::frontier_req_server::no_block_sent (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
connection->finish_request ();
}
else
{
if (connection->node->config.logging.network_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Error sending frontier finish: %1%") % ec.message ()));
}
}
}
void nano::frontier_req_server::sent_action (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
count++;
send_next ();
}
else
{
if (connection->node->config.logging.network_logging ())
{
connection->node->logger.try_log (boost::str (boost::format ("Error sending frontier pair: %1%") % ec.message ()));
}
}
}
void nano::frontier_req_server::next ()
{
// Filling accounts deque to prevent often read transactions
if (accounts.empty ())
{
auto now (nano::seconds_since_epoch ());
bool skip_old (request->age != std::numeric_limits<decltype (request->age)>::max ());
size_t max_size (128);
auto transaction (connection->node->store.tx_begin_read ());
for (auto i (connection->node->store.latest_begin (transaction, current.number () + 1)), n (connection->node->store.latest_end ()); i != n && accounts.size () != max_size; ++i)
{
nano::account_info const & info (i->second);
if (!skip_old || (now - info.modified) <= request->age)
{
nano::account const & account (i->first);
accounts.emplace_back (account, info.head);
}
}
/* If loop breaks before max_size, then latest_end () is reached
Add empty record to finish frontier_req_server */
if (accounts.size () != max_size)
{
accounts.emplace_back (nano::account (0), nano::block_hash (0));
}
}
// Retrieving accounts from deque
auto const & account_pair (accounts.front ());
current = account_pair.first;
frontier = account_pair.second;
accounts.pop_front ();
}
void nano::pulls_cache::add (nano::pull_info const & pull_a)
{
if (pull_a.processed > 500)
{
std::lock_guard<std::mutex> guard (pulls_cache_mutex);
// Clean old pull
if (cache.size () > cache_size_max)
{
cache.erase (cache.begin ());
}
assert (cache.size () <= cache_size_max);
nano::uint512_union head_512 (pull_a.account, pull_a.head_original);
auto existing (cache.get<account_head_tag> ().find (head_512));
if (existing == cache.get<account_head_tag> ().end ())
{
// Insert new pull
auto inserted (cache.insert (nano::cached_pulls{ std::chrono::steady_clock::now (), head_512, pull_a.head }));
(void)inserted;
assert (inserted.second);
}
else
{
// Update existing pull
cache.get<account_head_tag> ().modify (existing, [pull_a](nano::cached_pulls & cache_a) {
cache_a.time = std::chrono::steady_clock::now ();
cache_a.new_head = pull_a.head;
});
}
}
}
void nano::pulls_cache::update_pull (nano::pull_info & pull_a)
{
std::lock_guard<std::mutex> guard (pulls_cache_mutex);
nano::uint512_union head_512 (pull_a.account, pull_a.head_original);
auto existing (cache.get<account_head_tag> ().find (head_512));
if (existing != cache.get<account_head_tag> ().end ())
{
pull_a.head = existing->new_head;
}
}
void nano::pulls_cache::remove (nano::pull_info const & pull_a)
{
std::lock_guard<std::mutex> guard (pulls_cache_mutex);
nano::uint512_union head_512 (pull_a.account, pull_a.head_original);
cache.get<account_head_tag> ().erase (head_512);
}
namespace nano
{
std::unique_ptr<seq_con_info_component> collect_seq_con_info (pulls_cache & pulls_cache, const std::string & name)
{
size_t cache_count = 0;
{
std::lock_guard<std::mutex> guard (pulls_cache.pulls_cache_mutex);
cache_count = pulls_cache.cache.size ();
}
auto sizeof_element = sizeof (decltype (pulls_cache.cache)::value_type);
auto composite = std::make_unique<seq_con_info_composite> (name);
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "pulls_cache", cache_count, sizeof_element }));
return composite;
}
}
| 1 | 15,771 | Maybe a good opportunity to do a related improvement: The parameter `total_blocks` in `bootstrap_attempt::process_block` shadows a field with the same name, so maybe rename the parameter to `total_blocks_a` The parameter and field are incremented at the same time, but the field is used by `populate_connections` as well, so I assume they mean different things. Other than that, LGTM | nanocurrency-nano-node | cpp |
@@ -9,6 +9,8 @@
DEFINE_int32(default_parts_num, 100, "The default number of parts when a space is created");
DEFINE_int32(default_replica_factor, 1, "The default replica factor when a space is created");
+DEFINE_int32(default_space_sum, 100, "The default number of space can be created");
+DEFINE_int32(default_max_replica_factor, 1, "The default max replica factor that a space can take");
namespace nebula {
namespace meta { | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "meta/processors/partsMan/CreateSpaceProcessor.h"
#include "meta/ActiveHostsMan.h"
DEFINE_int32(default_parts_num, 100, "The default number of parts when a space is created");
DEFINE_int32(default_replica_factor, 1, "The default replica factor when a space is created");
namespace nebula {
namespace meta {
void CreateSpaceProcessor::process(const cpp2::CreateSpaceReq& req) {
folly::SharedMutex::WriteHolder wHolder(LockUtils::spaceLock());
auto properties = req.get_properties();
auto spaceRet = getSpaceId(properties.get_space_name());
if (spaceRet.ok()) {
cpp2::ErrorCode ret;
if (req.get_if_not_exists()) {
ret = cpp2::ErrorCode::SUCCEEDED;
} else {
LOG(ERROR) << "Create Space Failed : Space " << properties.get_space_name()
<< " have existed!";
ret = cpp2::ErrorCode::E_EXISTED;
}
resp_.set_id(to(spaceRet.value(), EntryType::SPACE));
handleErrorCode(ret);
onFinished();
return;
}
CHECK_EQ(Status::SpaceNotFound(), spaceRet.status());
auto hosts = ActiveHostsMan::getActiveHosts(kvstore_);
if (hosts.empty()) {
LOG(ERROR) << "Create Space Failed : No Hosts!";
handleErrorCode(cpp2::ErrorCode::E_NO_HOSTS);
onFinished();
return;
}
auto idRet = autoIncrementId();
if (!nebula::ok(idRet)) {
LOG(ERROR) << "Create Space Failed : Get space id failed";
handleErrorCode(nebula::error(idRet));
onFinished();
return;
}
auto spaceId = nebula::value(idRet);
auto spaceName = properties.get_space_name();
auto partitionNum = properties.get_partition_num();
auto replicaFactor = properties.get_replica_factor();
auto charsetName = properties.get_charset_name();
auto collateName = properties.get_collate_name();
// Use default values or values from meta's configuration file
if (partitionNum == 0) {
partitionNum = FLAGS_default_parts_num;
if (partitionNum <= 0) {
LOG(ERROR) << "Create Space Failed : partition_num is illegal!";
resp_.set_code(cpp2::ErrorCode::E_INVALID_PARTITION_NUM);
onFinished();
return;
}
// Set the default value back to the struct, which will be written to storage
properties.set_partition_num(partitionNum);
}
if (replicaFactor == 0) {
replicaFactor = FLAGS_default_replica_factor;
if (replicaFactor <= 0) {
LOG(ERROR) << "Create Space Failed : replicaFactor is illegal!";
resp_.set_code(cpp2::ErrorCode::E_INVALID_REPLICA_FACTOR);
onFinished();
return;
}
// Set the default value back to the struct, which will be written to storage
properties.set_replica_factor(replicaFactor);
}
VLOG(3) << "Create space " << spaceName << ", id " << spaceId;
if ((int32_t)hosts.size() < replicaFactor) {
LOG(ERROR) << "Not enough hosts existed for replica "
<< replicaFactor << ", hosts num " << hosts.size();
handleErrorCode(cpp2::ErrorCode::E_UNSUPPORTED);
onFinished();
return;
}
std::vector<kvstore::KV> data;
data.emplace_back(MetaServiceUtils::indexSpaceKey(spaceName),
std::string(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId)));
data.emplace_back(MetaServiceUtils::spaceKey(spaceId),
MetaServiceUtils::spaceVal(properties));
for (auto partId = 1; partId <= partitionNum; partId++) {
auto partHosts = pickHosts(partId, hosts, replicaFactor);
data.emplace_back(MetaServiceUtils::partKey(spaceId, partId),
MetaServiceUtils::partVal(partHosts));
}
handleErrorCode(cpp2::ErrorCode::SUCCEEDED);
resp_.set_id(to(spaceId, EntryType::SPACE));
doSyncPutAndUpdate(std::move(data));
}
std::vector<nebula::cpp2::HostAddr>
CreateSpaceProcessor::pickHosts(PartitionID partId,
const std::vector<HostAddr>& hosts,
int32_t replicaFactor) {
if (hosts.empty()) {
return std::vector<nebula::cpp2::HostAddr>();
}
auto startIndex = partId;
std::vector<nebula::cpp2::HostAddr> pickedHosts;
for (decltype(replicaFactor) i = 0; i < replicaFactor; i++) {
pickedHosts.emplace_back(toThriftHost(hosts[startIndex++ % hosts.size()]));
}
return pickedHosts;
}
} // namespace meta
} // namespace nebula
| 1 | 28,336 | Please revise the naming of these two flags. _**default**_ is the value we take when user doesn't provide. You could infer from the fact how `default_parts_num` are used. | vesoft-inc-nebula | cpp |
@@ -24,12 +24,13 @@ import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
-public class ImportTypeTransformer {
+public class ImportTypeTransformer implements ImportGenerator {
private enum ImportFileType {
SERVICE_FILE,
PROTO_FILE
}
+ @Override
public List<ImportTypeView> generateImports(Map<String, TypeAlias> imports) {
List<ImportTypeView> generatedImports = new ArrayList<>();
for (String key : imports.keySet()) { | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer;
import com.google.api.codegen.util.TypeAlias;
import com.google.api.codegen.viewmodel.ImportTypeView;
import com.google.api.tools.framework.model.Interface;
import com.google.api.tools.framework.model.Method;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
public class ImportTypeTransformer {
private enum ImportFileType {
SERVICE_FILE,
PROTO_FILE
}
public List<ImportTypeView> generateImports(Map<String, TypeAlias> imports) {
List<ImportTypeView> generatedImports = new ArrayList<>();
for (String key : imports.keySet()) {
TypeAlias value = imports.get(key);
generatedImports.add(
ImportTypeView.newBuilder()
.fullName(key)
.nickname(value.getNickname())
.type(value.getImportType())
.build());
}
return generatedImports;
}
public List<ImportTypeView> generateServiceFileImports(SurfaceTransformerContext context) {
return generateFileImports(context, ImportFileType.SERVICE_FILE);
}
public List<ImportTypeView> generateProtoFileImports(SurfaceTransformerContext context) {
return generateFileImports(context, ImportFileType.PROTO_FILE);
}
private List<ImportTypeView> generateFileImports(
SurfaceTransformerContext context, ImportFileType importFileType) {
SurfaceNamer namer = context.getNamer();
Set<String> fullNames = new TreeSet<>();
fullNames.add(getFileImport(context.getInterface(), namer, importFileType));
for (Method method : context.getSupportedMethods()) {
Interface targetInterface = context.asRequestMethodContext(method).getTargetInterface();
fullNames.add(getFileImport(targetInterface, namer, importFileType));
}
List<ImportTypeView> imports = new ArrayList<>();
for (String name : fullNames) {
ImportTypeView.Builder builder = ImportTypeView.newBuilder();
builder.fullName(name);
builder.nickname("");
imports.add(builder.build());
}
return imports;
}
private String getFileImport(
Interface service, SurfaceNamer namer, ImportFileType importFileType) {
return importFileType == ImportFileType.SERVICE_FILE
? namer.getServiceFileImportFromService(service)
: namer.getProtoFileImportFromService(service);
}
}
| 1 | 19,184 | I don't think we should have separate "transformer" and "generator" terms. I would suggest renaming this existing class `ImportTypeTransformer` to `StandardImportTypeTransformer` and renaming the interface `ImportGenerator` to `ImportTypeTransformer`. | googleapis-gapic-generator | java |
@@ -1,4 +1,4 @@
export { default as getContainers } from './get-containers';
export { default as isValidAccountID } from './is-valid-account-id';
export { default as isValidContainerID } from './is-valid-container-id';
-export { default as tagMatchers } from './tag-matchers';
+export { default as tagMatchers } from './tagMatchers'; | 1 | export { default as getContainers } from './get-containers';
export { default as isValidAccountID } from './is-valid-account-id';
export { default as isValidContainerID } from './is-valid-container-id';
export { default as tagMatchers } from './tag-matchers';
| 1 | 30,044 | I believe `tag-matchers` is the proper form for this filename. Only components should use pascal case, I believe everything else should continue to use kabob-case. | google-site-kit-wp | js |
@@ -19,6 +19,7 @@ package pubsub
import (
"context"
"fmt"
+ "github.com/google/knative-gcp/pkg/apis/duck/v1alpha1"
pubsubsourcev1alpha1 "github.com/google/knative-gcp/pkg/apis/pubsub/v1alpha1"
pubsubsourceclientset "github.com/google/knative-gcp/pkg/client/clientset/versioned" | 1 | /*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pubsub
import (
"context"
"fmt"
pubsubsourcev1alpha1 "github.com/google/knative-gcp/pkg/apis/pubsub/v1alpha1"
pubsubsourceclientset "github.com/google/knative-gcp/pkg/client/clientset/versioned"
"github.com/google/knative-gcp/pkg/duck"
"github.com/google/knative-gcp/pkg/reconciler"
"github.com/google/knative-gcp/pkg/reconciler/pubsub/resources"
"go.uber.org/zap"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
"knative.dev/pkg/logging"
)
type PubSubBase struct {
*reconciler.Base
// For dealing with Topics and Pullsubscriptions
pubsubClient pubsubsourceclientset.Interface
// What do we tag receive adapter as.
receiveAdapterName string
// What type of receive adapter to use.
adapterType string
}
// ReconcilePubSub reconciles Topic / PullSubscription given a PubSubSpec.
// Sets the following Conditions in the Status field appropriately:
// "TopicReady", and "PullSubscriptionReady"
// Also sets the following fields in the pubsubable.Status upon success
// TopicID, ProjectID, and SinkURI
func (psb *PubSubBase) ReconcilePubSub(ctx context.Context, pubsubable duck.PubSubable, topic, resourceGroup string) (*pubsubsourcev1alpha1.Topic, *pubsubsourcev1alpha1.PullSubscription, error) {
if pubsubable == nil {
return nil, nil, fmt.Errorf("nil pubsubable passed in")
}
namespace := pubsubable.GetObjectMeta().GetNamespace()
name := pubsubable.GetObjectMeta().GetName()
spec := pubsubable.PubSubSpec()
status := pubsubable.PubSubStatus()
topics := psb.pubsubClient.PubsubV1alpha1().Topics(namespace)
t, err := topics.Get(name, v1.GetOptions{})
if err != nil {
if !apierrs.IsNotFound(err) {
logging.FromContext(ctx).Desugar().Error("Failed to get Topics", zap.Error(err))
return nil, nil, fmt.Errorf("failed to get Topics: %w", err)
}
newTopic := resources.MakeTopic(namespace, name, spec, pubsubable, topic, psb.receiveAdapterName)
t, err = topics.Create(newTopic)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Topic", zap.Any("topic", newTopic), zap.Error(err))
return nil, nil, fmt.Errorf("failed to create Topic: %w", err)
}
}
cs := pubsubable.ConditionSet()
if !t.Status.IsReady() {
status.MarkTopicNotReady(cs, "TopicNotReady", "Topic %q not ready", t.Name)
return t, nil, fmt.Errorf("Topic %q not ready", t.Name)
}
if t.Status.ProjectID == "" {
status.MarkTopicNotReady(cs, "TopicNotReady", "Topic %q did not expose projectid", t.Name)
return t, nil, fmt.Errorf("Topic %q did not expose projectid", t.Name)
}
if t.Status.TopicID == "" {
status.MarkTopicNotReady(cs, "TopicNotReady", "Topic %q did not expose topicid", t.Name)
return t, nil, fmt.Errorf("Topic %q did not expose topicid", t.Name)
}
if t.Status.TopicID != topic {
status.MarkTopicNotReady(cs, "TopicNotReady", "Topic %q mismatch: expected %q got %q", t.Name, topic, t.Status.TopicID)
return t, nil, fmt.Errorf("Topic %q mismatch: expected %q got %q", t.Name, topic, t.Status.TopicID)
}
status.TopicID = t.Status.TopicID
status.ProjectID = t.Status.ProjectID
status.MarkTopicReady(cs)
// Ok, so the Topic is ready, let's reconcile PullSubscription.
pullSubscriptions := psb.pubsubClient.PubsubV1alpha1().PullSubscriptions(namespace)
ps, err := pullSubscriptions.Get(name, v1.GetOptions{})
if err != nil {
if !apierrs.IsNotFound(err) {
logging.FromContext(ctx).Desugar().Error("Failed to get PullSubscription", zap.Error(err))
return t, nil, fmt.Errorf("failed to get Pullsubscription: %w", err)
}
newPS := resources.MakePullSubscription(namespace, name, spec, pubsubable, topic, psb.receiveAdapterName, psb.adapterType, resourceGroup)
ps, err = pullSubscriptions.Create(newPS)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create PullSubscription", zap.Any("ps", newPS), zap.Error(err))
return t, nil, fmt.Errorf("failed to create PullSubscription: %w", err)
}
}
if !ps.Status.IsReady() {
status.MarkPullSubscriptionNotReady(cs, "PullSubscriptionNotReady", "PullSubscription %q not ready", ps.Name)
return t, ps, fmt.Errorf("PullSubscription %q not ready", ps.Name)
} else {
status.MarkPullSubscriptionReady(cs)
}
uri, err := apis.ParseURL(ps.Status.SinkURI)
if err != nil {
return t, ps, fmt.Errorf("failed to parse url %q: %w", ps.Status.SinkURI, err)
}
status.SinkURI = uri
return t, ps, nil
}
func (psb *PubSubBase) DeletePubSub(ctx context.Context, pubsubable duck.PubSubable) error {
if pubsubable == nil {
return fmt.Errorf("nil pubsubable passed in")
}
namespace := pubsubable.GetObjectMeta().GetNamespace()
name := pubsubable.GetObjectMeta().GetName()
status := pubsubable.PubSubStatus()
cs := pubsubable.ConditionSet()
// Delete the topic
err := psb.pubsubClient.PubsubV1alpha1().Topics(namespace).Delete(name, nil)
if err != nil && !apierrs.IsNotFound(err) {
logging.FromContext(ctx).Desugar().Error("Failed to delete Topic", zap.String("name", name), zap.Error(err))
status.MarkTopicNotReady(cs, "TopicDeleteFailed", "Failed to delete Topic: %s", err.Error())
return fmt.Errorf("failed to delete topic: %w", err)
}
status.MarkTopicNotReady(cs, "TopicDeleted", "Successfully deleted Topic: %s", name)
status.TopicID = ""
status.ProjectID = ""
// Delete the pullsubscription
err = psb.pubsubClient.PubsubV1alpha1().PullSubscriptions(namespace).Delete(name, nil)
if err != nil && !apierrs.IsNotFound(err) {
logging.FromContext(ctx).Desugar().Error("Failed to delete PullSubscription", zap.String("name", name), zap.Error(err))
status.MarkPullSubscriptionNotReady(cs, "PullSubscriptionDeleteFailed", "Failed to delete PullSubscription: %s", err.Error())
return fmt.Errorf("failed to delete PullSubscription: %w", err)
}
status.MarkPullSubscriptionNotReady(cs, "PullSubscriptionDeleted", "Successfully deleted PullSubscription: %s", name)
status.SinkURI = nil
return nil
}
| 1 | 10,340 | same here, move this one down | google-knative-gcp | go |
@@ -112,6 +112,7 @@ class OrderDataFixture extends AbstractReferenceFixture implements DependentFixt
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
+ $orderData->createdAsAdministrator = $this->getReference(AdministratorDataFixture::ADMINISTRATOR);
$this->createOrder(
$orderData,
[ | 1 | <?php
namespace Shopsys\FrameworkBundle\DataFixtures\Demo;
use Doctrine\Common\DataFixtures\DependentFixtureInterface;
use Doctrine\Common\Persistence\ObjectManager;
use Faker\Generator;
use Shopsys\FrameworkBundle\Component\DataFixture\AbstractReferenceFixture;
use Shopsys\FrameworkBundle\Component\Domain\Domain;
use Shopsys\FrameworkBundle\Model\Customer\User;
use Shopsys\FrameworkBundle\Model\Customer\UserRepository;
use Shopsys\FrameworkBundle\Model\Order\Item\QuantifiedProduct;
use Shopsys\FrameworkBundle\Model\Order\OrderData;
use Shopsys\FrameworkBundle\Model\Order\OrderDataFactoryInterface;
use Shopsys\FrameworkBundle\Model\Order\OrderFacade;
use Shopsys\FrameworkBundle\Model\Order\Preview\OrderPreviewFactory;
class OrderDataFixture extends AbstractReferenceFixture implements DependentFixtureInterface
{
const ORDER_PREFIX = 'order_';
/**
* @var \Shopsys\FrameworkBundle\Model\Customer\UserRepository
*/
private $userRepository;
/**
* @var \Faker\Generator
*/
private $faker;
/**
* @var \Shopsys\FrameworkBundle\Model\Order\OrderFacade
*/
private $orderFacade;
/**
* @var \Shopsys\FrameworkBundle\Model\Order\Preview\OrderPreviewFactory
*/
private $orderPreviewFactory;
/**
* @var \Shopsys\FrameworkBundle\Model\Order\OrderDataFactoryInterface
*/
private $orderDataFactory;
/**
* @param \Shopsys\FrameworkBundle\Model\Customer\UserRepository $userRepository
* @param \Faker\Generator $faker
* @param \Shopsys\FrameworkBundle\Model\Order\OrderFacade $orderFacade
* @param \Shopsys\FrameworkBundle\Model\Order\Preview\OrderPreviewFactory $orderPreviewFactory
* @param \Shopsys\FrameworkBundle\Model\Order\OrderDataFactoryInterface $orderDataFactory
*/
public function __construct(
UserRepository $userRepository,
Generator $faker,
OrderFacade $orderFacade,
OrderPreviewFactory $orderPreviewFactory,
OrderDataFactoryInterface $orderDataFactory
) {
$this->userRepository = $userRepository;
$this->faker = $faker;
$this->orderFacade = $orderFacade;
$this->orderPreviewFactory = $orderPreviewFactory;
$this->orderDataFactory = $orderDataFactory;
}
/**
* @param \Doctrine\Common\Persistence\ObjectManager $manager
*/
public function load(ObjectManager $manager)
{
$user = $this->userRepository->findUserByEmailAndDomain('[email protected]', 1);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_PERSONAL);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CASH);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_DONE);
$orderData->firstName = 'Jiří';
$orderData->lastName = 'Ševčík';
$orderData->email = '[email protected]';
$orderData->telephone = '+420369554147';
$orderData->street = 'První 1';
$orderData->city = 'Ostrava';
$orderData->postcode = '71200';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_CZECH_REPUBLIC_1);
$orderData->deliveryAddressSameAsBillingAddress = true;
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '9' => 2,
ProductDataFixture::PRODUCT_PREFIX . '10' => 3,
],
$user
);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_PERSONAL);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CARD);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_NEW);
$orderData->firstName = 'Iva';
$orderData->lastName = 'Jačková';
$orderData->email = '[email protected]';
$orderData->telephone = '+420367852147';
$orderData->street = 'Druhá 2';
$orderData->city = 'Ostrava';
$orderData->postcode = '71300';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_SLOVAKIA_1);
$orderData->deliveryAddressSameAsBillingAddress = true;
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '18' => 2,
ProductDataFixture::PRODUCT_PREFIX . '19' => 1,
ProductDataFixture::PRODUCT_PREFIX . '20' => 1,
ProductDataFixture::PRODUCT_PREFIX . '15' => 5,
],
$user
);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_CZECH_POST);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CASH_ON_DELIVERY);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_NEW);
$orderData->firstName = 'Jan';
$orderData->lastName = 'Adamovský';
$orderData->email = '[email protected]';
$orderData->telephone = '+420725852147';
$orderData->street = 'Třetí 3';
$orderData->city = 'Ostrava';
$orderData->postcode = '71200';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_CZECH_REPUBLIC_1);
$orderData->deliveryAddressSameAsBillingAddress = true;
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '4' => 6,
ProductDataFixture::PRODUCT_PREFIX . '11' => 1,
],
$user
);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_PPL);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CARD);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_IN_PROGRESS);
$orderData->firstName = 'Iveta';
$orderData->lastName = 'Prvá';
$orderData->email = '[email protected]';
$orderData->telephone = '+420606952147';
$orderData->street = 'Čtvrtá 4';
$orderData->city = 'Ostrava';
$orderData->postcode = '70030';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_SLOVAKIA_1);
$orderData->deliveryAddressSameAsBillingAddress = true;
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '1' => 1,
],
$user
);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_PERSONAL);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CASH);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_DONE);
$orderData->firstName = 'Jana';
$orderData->lastName = 'Janíčková';
$orderData->email = '[email protected]';
$orderData->telephone = '+420739852148';
$orderData->street = 'Pátá 55';
$orderData->city = 'Ostrava';
$orderData->postcode = '71200';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_CZECH_REPUBLIC_1);
$orderData->deliveryAddressSameAsBillingAddress = true;
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '2' => 8,
ProductDataFixture::PRODUCT_PREFIX . '3' => 1,
ProductDataFixture::PRODUCT_PREFIX . '1' => 2,
],
$user
);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_PPL);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CARD);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_NEW);
$orderData->firstName = 'Dominik';
$orderData->lastName = 'Hašek';
$orderData->email = '[email protected]';
$orderData->telephone = '+420721852152';
$orderData->street = 'Šestá 39';
$orderData->city = 'Pardubice';
$orderData->postcode = '58941';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_SLOVAKIA_1);
$orderData->deliveryAddressSameAsBillingAddress = true;
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '13' => 2,
ProductDataFixture::PRODUCT_PREFIX . '14' => 1,
ProductDataFixture::PRODUCT_PREFIX . '15' => 1,
ProductDataFixture::PRODUCT_PREFIX . '16' => 1,
ProductDataFixture::PRODUCT_PREFIX . '17' => 1,
ProductDataFixture::PRODUCT_PREFIX . '18' => 1,
],
$user
);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_PERSONAL);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CASH);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_CANCELED);
$orderData->firstName = 'Jiří';
$orderData->lastName = 'Sovák';
$orderData->email = '[email protected]';
$orderData->telephone = '+420755872155';
$orderData->street = 'Sedmá 1488';
$orderData->city = 'Opava';
$orderData->postcode = '85741';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_CZECH_REPUBLIC_1);
$orderData->deliveryAddressSameAsBillingAddress = true;
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '7' => 1,
ProductDataFixture::PRODUCT_PREFIX . '8' => 1,
ProductDataFixture::PRODUCT_PREFIX . '12' => 2,
]
);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_CZECH_POST);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CASH_ON_DELIVERY);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_DONE);
$orderData->firstName = 'Josef';
$orderData->lastName = 'Somr';
$orderData->email = '[email protected]';
$orderData->telephone = '+420369852147';
$orderData->street = 'Osmá 1';
$orderData->city = 'Praha';
$orderData->postcode = '30258';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_SLOVAKIA_1);
$orderData->deliveryAddressSameAsBillingAddress = true;
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '1' => 6,
ProductDataFixture::PRODUCT_PREFIX . '2' => 1,
ProductDataFixture::PRODUCT_PREFIX . '12' => 1,
]
);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_PERSONAL);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CASH);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_CANCELED);
$orderData->firstName = 'Ivan';
$orderData->lastName = 'Horník';
$orderData->email = '[email protected]';
$orderData->telephone = '+420755496328';
$orderData->street = 'Desátá 10';
$orderData->city = 'Plzeň';
$orderData->postcode = '30010';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_CZECH_REPUBLIC_1);
$orderData->deliveryAddressSameAsBillingAddress = true;
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '9' => 3,
ProductDataFixture::PRODUCT_PREFIX . '13' => 2,
]
);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_PPL);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CARD);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_NEW);
$orderData->firstName = 'Adam';
$orderData->lastName = 'Bořič';
$orderData->email = '[email protected]';
$orderData->telephone = '+420987654321';
$orderData->street = 'Cihelní 5';
$orderData->city = 'Liberec';
$orderData->postcode = '65421';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_CZECH_REPUBLIC_1);
$orderData->deliveryAddressSameAsBillingAddress = true;
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '3' => 1,
]
);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_PERSONAL);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CASH);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_IN_PROGRESS);
$orderData->firstName = 'Evžen';
$orderData->lastName = 'Farný';
$orderData->email = '[email protected]';
$orderData->telephone = '+420456789123';
$orderData->street = 'Gagarinova 333';
$orderData->city = 'Hodonín';
$orderData->postcode = '69501';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_SLOVAKIA_1);
$orderData->deliveryAddressSameAsBillingAddress = true;
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '1' => 1,
ProductDataFixture::PRODUCT_PREFIX . '2' => 1,
ProductDataFixture::PRODUCT_PREFIX . '3' => 1,
]
);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_PERSONAL);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CASH);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_DONE);
$orderData->firstName = 'Ivana';
$orderData->lastName = 'Janečková';
$orderData->email = '[email protected]';
$orderData->telephone = '+420369852147';
$orderData->street = 'Kalužní 88';
$orderData->city = 'Lednice';
$orderData->postcode = '69144';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_CZECH_REPUBLIC_1);
$orderData->deliveryAddressSameAsBillingAddress = true;
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '4' => 2,
ProductDataFixture::PRODUCT_PREFIX . '3' => 1,
]
);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_CZECH_POST);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CASH_ON_DELIVERY);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_NEW);
$orderData->firstName = 'Pavel';
$orderData->lastName = 'Novák';
$orderData->email = '[email protected]';
$orderData->telephone = '+420605123654';
$orderData->street = 'Adresní 6';
$orderData->city = 'Opava';
$orderData->postcode = '72589';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_SLOVAKIA_1);
$orderData->deliveryAddressSameAsBillingAddress = true;
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '10' => 1,
ProductDataFixture::PRODUCT_PREFIX . '20' => 4,
]
);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_PPL);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CARD);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_DONE);
$orderData->firstName = 'Pavla';
$orderData->lastName = 'Adámková';
$orderData->email = '[email protected]';
$orderData->telephone = '+4206051836459';
$orderData->street = 'Výpočetni 16';
$orderData->city = 'Praha';
$orderData->postcode = '30015';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_CZECH_REPUBLIC_1);
$orderData->deliveryAddressSameAsBillingAddress = true;
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '15' => 1,
ProductDataFixture::PRODUCT_PREFIX . '18' => 1,
ProductDataFixture::PRODUCT_PREFIX . '19' => 1,
ProductDataFixture::PRODUCT_PREFIX . '3' => 1,
]
);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_PERSONAL);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CASH);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_IN_PROGRESS);
$orderData->firstName = 'Adam';
$orderData->lastName = 'Žitný';
$orderData->email = '[email protected]';
$orderData->telephone = '+4206051836459';
$orderData->street = 'Přímá 1';
$orderData->city = 'Plzeň';
$orderData->postcode = '30010';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_SLOVAKIA_1);
$orderData->deliveryAddressSameAsBillingAddress = true;
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '9' => 1,
ProductDataFixture::PRODUCT_PREFIX . '19' => 1,
ProductDataFixture::PRODUCT_PREFIX . '6' => 1,
]
);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_PPL);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CARD);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_NEW);
$orderData->firstName = 'Radim';
$orderData->lastName = 'Svátek';
$orderData->email = '[email protected]';
$orderData->telephone = '+420733598748';
$orderData->street = 'Křivá 11';
$orderData->city = 'Jablonec';
$orderData->postcode = '78952';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_CZECH_REPUBLIC_1);
$orderData->deliveryAddressSameAsBillingAddress = true;
$orderData->companyName = 'BestCompanyEver, s.r.o.';
$orderData->companyNumber = '555555';
$orderData->note = 'Doufám, že vše dorazí v pořádku a co nejdříve :)';
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '7' => 1,
ProductDataFixture::PRODUCT_PREFIX . '17' => 6,
ProductDataFixture::PRODUCT_PREFIX . '9' => 1,
ProductDataFixture::PRODUCT_PREFIX . '14' => 1,
ProductDataFixture::PRODUCT_PREFIX . '10' => 2,
]
);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_PPL);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CARD);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_NEW);
$orderData->firstName = 'Radim';
$orderData->lastName = 'Svátek';
$orderData->email = '[email protected]';
$orderData->telephone = '+420733598748';
$orderData->street = 'Křivá 11';
$orderData->city = 'Jablonec';
$orderData->postcode = '78952';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_CZECH_REPUBLIC_1);
$orderData->deliveryAddressSameAsBillingAddress = false;
$orderData->deliveryCity = 'Ostrava';
$orderData->deliveryCompanyName = 'BestCompanyEver, s.r.o.';
$orderData->deliveryCountry = $this->getReference(CountryDataFixture::COUNTRY_SLOVAKIA_1);
$orderData->deliveryStreet = 'Křivá 11';
$orderData->deliveryTelephone = '+421555444';
$orderData->deliveryPostcode = '01305';
$orderData->deliveryFirstName = 'Pavol';
$orderData->deliveryLastName = 'Svátek';
$orderData->companyName = 'BestCompanyEver, s.r.o.';
$orderData->companyNumber = '555555';
$orderData->note = 'Doufám, že vše dorazí v pořádku a co nejdříve :)';
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '7' => 1,
ProductDataFixture::PRODUCT_PREFIX . '17' => 6,
ProductDataFixture::PRODUCT_PREFIX . '9' => 1,
ProductDataFixture::PRODUCT_PREFIX . '14' => 1,
ProductDataFixture::PRODUCT_PREFIX . '10' => 2,
]
);
$user = $this->userRepository->findUserByEmailAndDomain('[email protected]', Domain::FIRST_DOMAIN_ID);
$orderData = $this->orderDataFactory->create();
$orderData->transport = $this->getReference(TransportDataFixture::TRANSPORT_PPL);
$orderData->payment = $this->getReference(PaymentDataFixture::PAYMENT_CARD);
$orderData->status = $this->getReference(OrderStatusDataFixture::ORDER_STATUS_NEW);
$orderData->firstName = 'Radim';
$orderData->lastName = 'Svátek';
$orderData->email = '[email protected]';
$orderData->telephone = '+420733598748';
$orderData->street = 'Křivá 11';
$orderData->city = 'Jablonec';
$orderData->postcode = '78952';
$orderData->country = $this->getReference(CountryDataFixture::COUNTRY_CZECH_REPUBLIC_1);
$orderData->deliveryAddressSameAsBillingAddress = false;
$orderData->deliveryCity = 'Ostrava';
$orderData->deliveryCompanyName = 'BestCompanyEver, s.r.o.';
$orderData->deliveryCountry = $this->getReference(CountryDataFixture::COUNTRY_SLOVAKIA_1);
$orderData->deliveryStreet = 'Křivá 11';
$orderData->deliveryTelephone = '+421555444';
$orderData->deliveryPostcode = '01305';
$orderData->deliveryFirstName = 'Pavol';
$orderData->deliveryLastName = 'Svátek';
$orderData->companyName = 'BestCompanyEver, s.r.o.';
$orderData->companyNumber = '555555';
$orderData->note = 'Doufám, že vše dorazí v pořádku a co nejdříve :)';
$orderData->domainId = Domain::FIRST_DOMAIN_ID;
$orderData->currency = $this->getReference(CurrencyDataFixture::CURRENCY_CZK);
$orderData->createdAt = $this->faker->dateTimeBetween('-1 week', 'now');
$this->createOrder(
$orderData,
[
ProductDataFixture::PRODUCT_PREFIX . '7' => 1,
ProductDataFixture::PRODUCT_PREFIX . '17' => 6,
ProductDataFixture::PRODUCT_PREFIX . '9' => 1,
ProductDataFixture::PRODUCT_PREFIX . '14' => 1,
ProductDataFixture::PRODUCT_PREFIX . '10' => 2,
],
$user
);
}
/**
* @param \Shopsys\FrameworkBundle\Model\Order\OrderData $orderData
* @param array $products
* @param \Shopsys\FrameworkBundle\Model\Customer\User $user
*/
private function createOrder(
OrderData $orderData,
array $products,
User $user = null
) {
$quantifiedProducts = [];
foreach ($products as $productReferenceName => $quantity) {
$product = $this->getReference($productReferenceName);
$quantifiedProducts[] = new QuantifiedProduct($product, $quantity);
}
$orderPreview = $this->orderPreviewFactory->create(
$orderData->currency,
$orderData->domainId,
$quantifiedProducts,
$orderData->transport,
$orderData->payment,
$user,
null
);
$order = $this->orderFacade->createOrder($orderData, $orderPreview, $user);
/* @var $order \Shopsys\FrameworkBundle\Model\Order\Order */
$referenceName = self::ORDER_PREFIX . $order->getId();
$this->addReference($referenceName, $order);
}
/**
* {@inheritDoc}
*/
public function getDependencies()
{
return [
ProductDataFixture::class,
TransportDataFixture::class,
PaymentDataFixture::class,
UserDataFixture::class,
OrderStatusDataFixture::class,
CountryDataFixture::class,
];
}
}
| 1 | 12,576 | Some could be created as superadmin instead, to add more special cases. Also, the reason behind this change should be described in the commit msg and the PR description. | shopsys-shopsys | php |
@@ -28,6 +28,7 @@ from pyspark.sql import functions as F
from pyspark.sql.types import FloatType, DoubleType, LongType, StructType, TimestampType, \
to_arrow_type
+from databricks import koalas as ks
from databricks.koalas.dask.utils import derived_from
from databricks.koalas.frame import DataFrame
from databricks.koalas.generic import _Frame, max_display_count | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark Column to behave similar to pandas Series.
"""
from decorator import decorator, dispatch_on
from functools import partial
import numpy as np
import pandas as pd
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.types import FloatType, DoubleType, LongType, StructType, TimestampType, \
to_arrow_type
from databricks.koalas.dask.utils import derived_from
from databricks.koalas.frame import DataFrame
from databricks.koalas.generic import _Frame, max_display_count
from databricks.koalas.metadata import Metadata
from databricks.koalas.missing.series import _MissingPandasLikeSeries
from databricks.koalas.selection import SparkDataFrameLocator
@decorator
def _column_op(f, self, *args):
"""
A decorator that wraps APIs taking/returning Spark Column so that Koalas Series can be
supported too. If this decorator is used for the `f` function that takes Spark Column and
returns Spark Column, decorated `f` takes Koalas Series as well and returns Koalas
Series.
:param f: a function that takes Spark Column and returns Spark Column.
:param self: Koalas Series
:param args: arguments that the function `f` takes.
"""
assert all((not isinstance(arg, Series)) or (arg._kdf is self._kdf) for arg in args), \
"Cannot combine column argument because it comes from a different dataframe"
# It is possible for the function `f` takes other arguments than Spark Column.
# To cover this case, explicitly check if the argument is Koalas Series and
# extract Spark Column. For other arguments, they are used as are.
args = [arg._scol if isinstance(arg, Series) else arg for arg in args]
scol = f(self._scol, *args)
return Series(scol, self._kdf, self._index_info)
@decorator
def _numpy_column_op(f, self, *args):
# PySpark does not support NumPy type out of the box. For now, we convert NumPy types
# into some primitive types understandable in PySpark.
new_args = []
for arg in args:
# TODO: This is a quick hack to support NumPy type. We should revisit this.
if isinstance(self.spark_type, LongType) and isinstance(arg, np.timedelta64):
new_args.append(float(arg / np.timedelta64(1, 's')))
else:
new_args.append(arg)
return _column_op(f)(self, *new_args)
class Series(_Frame):
"""
Koala Series that corresponds to Pandas Series logically. This holds Spark Column
internally.
:ivar _scol: Spark Column instance
:ivar _kdf: Parent's Koalas DataFrame
:ivar _index_info: Each pair holds the index field name which exists in Spark fields,
and the index name.
"""
@derived_from(pd.Series)
@dispatch_on('data')
def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False):
s = pd.Series(data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath)
self._init_from_pandas(s)
@__init__.register(pd.Series)
def _init_from_pandas(self, s, *args):
"""
Creates Koalas Series from Pandas Series.
:param s: Pandas Series
"""
kdf = DataFrame(pd.DataFrame(s))
self._init_from_spark(kdf._sdf[kdf._metadata.column_fields[0]],
kdf, kdf._metadata.index_info)
@__init__.register(spark.Column)
def _init_from_spark(self, scol, kdf, index_info, *args):
"""
Creates Koalas Series from Spark Column.
:param scol: Spark Column
:param kdf: Koalas DataFrame that should have the `scol`.
:param index_info: index information of this Series.
"""
assert index_info is not None
self._scol = scol
self._kdf = kdf
self._index_info = index_info
# arithmetic operators
__neg__ = _column_op(spark.Column.__neg__)
__add__ = _column_op(spark.Column.__add__)
def __sub__(self, other):
# Note that timestamp subtraction casts arguments to integer. This is to mimic Pandas's
# behaviors. Pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.
if isinstance(other, Series) and isinstance(self.spark_type, TimestampType):
if not isinstance(other.spark_type, TimestampType):
raise TypeError('datetime subtraction can only be applied to datetime series.')
return self.astype('bigint') - other.astype('bigint')
else:
return _column_op(spark.Column.__sub__)(self, other)
__mul__ = _column_op(spark.Column.__mul__)
__div__ = _numpy_column_op(spark.Column.__div__)
__truediv__ = _numpy_column_op(spark.Column.__truediv__)
__mod__ = _column_op(spark.Column.__mod__)
__radd__ = _column_op(spark.Column.__radd__)
__rsub__ = _column_op(spark.Column.__rsub__)
__rmul__ = _column_op(spark.Column.__rmul__)
__rdiv__ = _numpy_column_op(spark.Column.__rdiv__)
__rtruediv__ = _numpy_column_op(spark.Column.__rtruediv__)
__rmod__ = _column_op(spark.Column.__rmod__)
__pow__ = _column_op(spark.Column.__pow__)
__rpow__ = _column_op(spark.Column.__rpow__)
# logistic operators
__eq__ = _column_op(spark.Column.__eq__)
__ne__ = _column_op(spark.Column.__ne__)
__lt__ = _column_op(spark.Column.__lt__)
__le__ = _column_op(spark.Column.__le__)
__ge__ = _column_op(spark.Column.__ge__)
__gt__ = _column_op(spark.Column.__gt__)
# `and`, `or`, `not` cannot be overloaded in Python,
# so use bitwise operators as boolean operators
__and__ = _column_op(spark.Column.__and__)
__or__ = _column_op(spark.Column.__or__)
__invert__ = _column_op(spark.Column.__invert__)
__rand__ = _column_op(spark.Column.__rand__)
__ror__ = _column_op(spark.Column.__ror__)
@property
def dtype(self):
"""Return the dtype object of the underlying data.
Examples
--------
>>> s = ks.Series([1, 2, 3])
>>> s.dtype
dtype('int64')
>>> s = ks.Series(list('abc'))
>>> s.dtype
dtype('O')
>>> s = ks.Series(pd.date_range('20130101', periods=3))
>>> s.dtype
dtype('<M8[ns]')
"""
if type(self.spark_type) == TimestampType:
return np.dtype('datetime64[ns]')
else:
return np.dtype(to_arrow_type(self.spark_type).to_pandas_dtype())
@property
def spark_type(self):
""" Returns the data type as defined by Spark, as a Spark DataType object."""
return self.schema.fields[-1].dataType
def astype(self, dtype):
from databricks.koalas.typing import as_spark_type
spark_type = as_spark_type(dtype)
if not spark_type:
raise ValueError("Type {} not understood".format(dtype))
return Series(self._scol.cast(spark_type), self._kdf, self._index_info)
def getField(self, name):
if not isinstance(self.schema, StructType):
raise AttributeError("Not a struct: {}".format(self.schema))
else:
fnames = self.schema.fieldNames()
if name not in fnames:
raise AttributeError(
"Field {} not found, possible values are {}".format(name, ", ".join(fnames)))
return Series(self._scol.getField(name), self._kdf, self._index_info)
# TODO: automate the process here
def alias(self, name):
return self.rename(name)
@property
def schema(self):
return self.to_dataframe()._sdf.schema
@property
def shape(self):
"""Return a tuple of the shape of the underlying data."""
return len(self),
@property
def name(self):
return self._metadata.column_fields[0]
@name.setter
def name(self, name):
self.rename(name, inplace=True)
@derived_from(pd.Series)
def rename(self, index=None, **kwargs):
if index is None:
return self
scol = self._scol.alias(index)
if kwargs.get('inplace', False):
self._scol = scol
return self
else:
return Series(scol, self._kdf, self._index_info)
@property
def _metadata(self):
return self.to_dataframe()._metadata
@property
def index(self):
"""The index (axis labels) Column of the Series.
Currently supported only when the DataFrame has a single index.
"""
if len(self._metadata.index_info) != 1:
raise KeyError('Currently supported only when the Column has a single index.')
return self._kdf.index
@derived_from(pd.Series)
def reset_index(self, level=None, drop=False, name=None, inplace=False):
if inplace and not drop:
raise TypeError('Cannot reset_index inplace on a Series to create a DataFrame')
if name is not None:
kdf = self.rename(name).to_dataframe()
else:
kdf = self.to_dataframe()
kdf = kdf.reset_index(level=level, drop=drop)
if drop:
s = _col(kdf)
if inplace:
self._kdf = kdf
self._scol = s._scol
self._index_info = s._index_info
else:
return s
else:
return kdf
@property
def loc(self):
return SparkDataFrameLocator(self)
def to_dataframe(self):
sdf = self._kdf._sdf.select([field for field, _ in self._index_info] + [self._scol])
metadata = Metadata(column_fields=[sdf.schema[-1].name], index_info=self._index_info)
return DataFrame(sdf, metadata)
def toPandas(self):
return _col(self.to_dataframe().toPandas())
@derived_from(pd.Series)
def isnull(self):
if isinstance(self.schema[self.name].dataType, (FloatType, DoubleType)):
return Series(self._scol.isNull() | F.isnan(self._scol), self._kdf, self._index_info)
else:
return Series(self._scol.isNull(), self._kdf, self._index_info)
isna = isnull
@derived_from(pd.Series)
def notnull(self):
return ~self.isnull()
notna = notnull
@derived_from(pd.Series)
def dropna(self, axis=0, inplace=False, **kwargs):
ks = _col(self.to_dataframe().dropna(axis=axis, inplace=False))
if inplace:
self._kdf = ks._kdf
self._scol = ks._scol
else:
return ks
@derived_from(DataFrame)
def head(self, n=5):
return _col(self.to_dataframe().head(n))
def unique(self):
# Pandas wants a series/array-like object
return _col(self.to_dataframe().unique())
@derived_from(pd.Series)
def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
if bins is not None:
raise NotImplementedError("value_counts currently does not support bins")
if dropna:
sdf_dropna = self._kdf._sdf.filter(self.notna()._scol)
else:
sdf_dropna = self._kdf._sdf
sdf = sdf_dropna.groupby(self._scol).count()
if sort:
if ascending:
sdf = sdf.orderBy(F.col('count'))
else:
sdf = sdf.orderBy(F.col('count').desc())
if normalize:
sum = sdf_dropna.count()
sdf = sdf.withColumn('count', F.col('count') / F.lit(sum))
index_name = 'index' if self.name != 'index' else 'level_0'
kdf = DataFrame(sdf)
kdf.columns = [index_name, self.name]
kdf._metadata = Metadata(column_fields=[self.name], index_info=[(index_name, None)])
return _col(kdf)
def corr(self, other, method='pearson'):
"""
Compute correlation with `other` Series, excluding missing values.
Parameters
----------
other : Series
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
correlation : float
Examples
--------
>>> df = ks.DataFrame({'s1': [.2, .0, .6, .2],
... 's2': [.3, .6, .0, .1]})
>>> s1 = df.s1
>>> s2 = df.s2
>>> s1.corr(s2, method='pearson')
-0.8510644963469898
>>> s1.corr(s2, method='spearman')
-0.9486832980505125
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
# This implementation is suboptimal because it computes more than necessary,
# but it should be a start
df = self._kdf.assign(corr_arg1=self, corr_arg2=other)[["corr_arg1", "corr_arg2"]]
c = df.corr(method=method)
return c.loc["corr_arg1", "corr_arg2"]
def count(self):
"""
Return number of non-NA/null observations in the Series.
Returns
-------
nobs : int
"""
return self._reduce_for_stat_function(F.count)
def _reduce_for_stat_function(self, sfun):
return _unpack_scalar(self._kdf._sdf.select(sfun(self._scol)))
def __len__(self):
return len(self.to_dataframe())
def __getitem__(self, key):
return Series(self._scol.__getitem__(key), self._kdf, self._index_info)
def __getattr__(self, item):
if item.startswith("__") or item.startswith("_pandas_") or item.startswith("_spark_"):
raise AttributeError(item)
if hasattr(_MissingPandasLikeSeries, item):
return partial(getattr(_MissingPandasLikeSeries, item), self)
return self.getField(item)
def __str__(self):
return self._pandas_orig_repr()
def __repr__(self):
return repr(self.head(max_display_count).toPandas())
def __dir__(self):
if not isinstance(self.schema, StructType):
fields = []
else:
fields = [f for f in self.schema.fieldNames() if ' ' not in f]
return super(Series, self).__dir__() + fields
def _pandas_orig_repr(self):
# TODO: figure out how to reuse the original one.
return 'Column<%s>' % self._scol._jc.toString().encode('utf8')
def _unpack_scalar(sdf):
"""
Takes a dataframe that is supposed to contain a single row with a single scalar value,
and returns this value.
"""
l = sdf.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row.asDict().values())
assert len(l2) == 1, (row, l2)
return l2[0]
def _col(df):
assert isinstance(df, (DataFrame, pd.DataFrame)), type(df)
return df[df.columns[0]]
| 1 | 8,545 | Is this required? | databricks-koalas | py |
@@ -483,9 +483,10 @@ func start(c *cli.Context) error {
Cluster: cm,
StoragePolicy: sp,
Security: &sdk.SecurityConfig{
- Role: rm,
- Tls: tlsConfig,
- Authenticators: authenticators,
+ Role: rm,
+ Tls: tlsConfig,
+ Authenticators: authenticators,
+ PublicVolumeCreationDisabled: !c.Bool("public-volume-create-allowed"),
},
})
if err != nil { | 1 | //go:generate swagger generate spec -m -o ../../api/swagger/swagger.json
// Package classification OSD API.
//
// OpenStorage is a clustered implementation of the Open Storage specification and relies on the OCI runtime.
// It allows you to run stateful services in containers in a multi-host clustered environment.
// This document represents the API documentaton of Openstorage, for the GO client please visit:
// https://github.com/libopenstorage/openstorage
//
// Schemes: http, https
// Host: localhost
// BasePath: /v1
// Version: 2.0.0
// License: APACHE2 https://opensource.org/licenses/Apache-2.0
// Contact: https://github.com/libopenstorage/openstorage
//
// Consumes:
// - application/json
//
// Produces:
// - application/json
//
// swagger:meta
package main
import (
"fmt"
"io/ioutil"
"net/url"
"os"
"runtime"
"strconv"
"strings"
"github.com/codegangsta/cli"
"github.com/docker/docker/pkg/reexec"
"github.com/libopenstorage/openstorage/api"
"github.com/libopenstorage/openstorage/api/flexvolume"
"github.com/libopenstorage/openstorage/api/server"
"github.com/libopenstorage/openstorage/api/server/sdk"
osdcli "github.com/libopenstorage/openstorage/cli"
"github.com/libopenstorage/openstorage/cluster"
clustermanager "github.com/libopenstorage/openstorage/cluster/manager"
"github.com/libopenstorage/openstorage/config"
"github.com/libopenstorage/openstorage/csi"
graphdrivers "github.com/libopenstorage/openstorage/graph/drivers"
"github.com/libopenstorage/openstorage/objectstore"
"github.com/libopenstorage/openstorage/pkg/auth"
"github.com/libopenstorage/openstorage/pkg/auth/systemtoken"
"github.com/libopenstorage/openstorage/pkg/role"
policy "github.com/libopenstorage/openstorage/pkg/storagepolicy"
"github.com/libopenstorage/openstorage/schedpolicy"
"github.com/libopenstorage/openstorage/volume"
volumedrivers "github.com/libopenstorage/openstorage/volume/drivers"
"github.com/portworx/kvdb"
"github.com/portworx/kvdb/consul"
etcd "github.com/portworx/kvdb/etcd/v2"
"github.com/portworx/kvdb/mem"
"github.com/sirupsen/logrus"
)
var (
datastores = []string{mem.Name, etcd.Name, consul.Name}
)
func main() {
if reexec.Init() {
return
}
app := cli.NewApp()
app.Name = "osd"
app.Usage = "Open Storage CLI"
app.Version = config.Version
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "json,j",
Usage: "output in json",
},
cli.BoolFlag{
Name: osdcli.DaemonAlias,
Usage: "Start OSD in daemon mode",
},
cli.StringSliceFlag{
Name: "driver",
Usage: "driver name and options: name=btrfs,home=/var/openstorage/btrfs",
Value: new(cli.StringSlice),
},
cli.StringFlag{
Name: "kvdb,k",
Usage: "uri to kvdb e.g. kv-mem://localhost, etcd-kv://localhost:4001, consul-kv://localhost:8500",
Value: "kv-mem://localhost",
},
cli.StringFlag{
Name: "file,f",
Usage: "file to read the OSD configuration from.",
Value: "",
},
cli.StringFlag{
Name: "mgmtport,m",
Usage: "Management Port for REST server. Example: 9001",
Value: "9001",
},
cli.StringFlag{
Name: "sdkport",
Usage: "gRPC port for SDK. Example: 9100",
Value: "9100",
},
cli.StringFlag{
Name: "sdkrestport",
Usage: "gRPC REST Gateway port for SDK. Example: 9110",
Value: "9110",
},
cli.StringFlag{
Name: "nodeid",
Usage: "Name of this node",
Value: "1",
},
cli.StringFlag{
Name: "clusterid",
Usage: "Cluster id",
Value: "openstorage.cluster",
},
cli.StringFlag{
Name: "tls-cert-file",
Usage: "TLS Cert file path",
},
cli.StringFlag{
Name: "tls-key-file",
Usage: "TLS Key file path",
},
cli.StringFlag{
Name: "username-claim",
Usage: "Claim key from the token to use as the unique id of the ." +
"user. Values can be only 'sub', 'email', or 'name'",
Value: "sub",
},
cli.StringFlag{
Name: "oidc-issuer",
Usage: "OIDC Issuer,e.g. https://accounts.google.com",
},
cli.StringFlag{
Name: "oidc-client-id",
Usage: "OIDC Client ID provided by issuer",
},
cli.StringFlag{
Name: "oidc-custom-claim-namespace",
Usage: "OIDC namespace for custom claims if needed",
},
cli.BoolFlag{
Name: "oidc-skip-client-id-check",
Usage: "OIDC skip verification of client id in the token",
},
cli.StringFlag{
Name: "jwt-issuer",
Usage: "JSON Web Token issuer",
Value: "openstorage.io",
},
cli.StringFlag{
Name: "jwt-shared-secret",
Usage: "JSON Web Token shared secret",
},
cli.StringFlag{
Name: "jwt-rsa-pubkey-file",
Usage: "JSON Web Token RSA Public file path",
},
cli.StringFlag{
Name: "jwt-ecds-pubkey-file",
Usage: "JSON Web Token ECDS Public file path",
},
cli.StringFlag{
Name: "jwt-system-shared-secret",
Usage: "JSON Web Token system shared secret used by clusters to create tokens for internal cluster communication",
Value: "non-secure-secret",
},
cli.StringFlag{
Name: "clusterdomain",
Usage: "Cluster Domain Name",
Value: "",
},
}
app.Action = wrapAction(start)
app.Commands = []cli.Command{
{
Name: "driver",
Aliases: []string{"d"},
Usage: "Manage drivers",
Subcommands: osdcli.DriverCommands(),
},
{
Name: "cluster",
Aliases: []string{"c"},
Usage: "Manage cluster",
Subcommands: osdcli.ClusterCommands(),
},
{
Name: "version",
Aliases: []string{"v"},
Usage: "Display version",
Action: wrapAction(showVersion),
},
}
// Register all volume drivers with the CLI.
for _, v := range volumedrivers.AllDrivers {
// TODO(pedge): was an and, but we have no drivers that have two types
switch v.DriverType {
case api.DriverType_DRIVER_TYPE_BLOCK:
bCmds := osdcli.BlockVolumeCommands(v.Name)
cmds := append(bCmds)
c := cli.Command{
Name: v.Name,
Usage: fmt.Sprintf("Manage %s storage", v.Name),
Subcommands: cmds,
}
app.Commands = append(app.Commands, c)
case api.DriverType_DRIVER_TYPE_FILE:
fCmds := osdcli.FileVolumeCommands(v.Name)
cmds := append(fCmds)
c := cli.Command{
Name: v.Name,
Usage: fmt.Sprintf("Manage %s volumes", v.Name),
Subcommands: cmds,
}
app.Commands = append(app.Commands, c)
}
}
// Register all graph drivers with the CLI.
for _, v := range graphdrivers.AllDrivers {
// TODO(pedge): was an and, but we have no drivers that have two types
switch v.DriverType {
case api.DriverType_DRIVER_TYPE_GRAPH:
cmds := osdcli.GraphDriverCommands(v.Name)
c := cli.Command{
Name: v.Name,
Usage: fmt.Sprintf("Manage %s graph storage", v.Name),
Subcommands: cmds,
}
app.Commands = append(app.Commands, c)
}
}
app.Run(os.Args)
}
func start(c *cli.Context) error {
if !osdcli.DaemonMode(c) {
cli.ShowAppHelp(c)
return nil
}
var (
cfg *config.Config
)
// We are in daemon mode.
file := c.String("file")
if len(file) != 0 {
// Read from file
var err error
cfg, err = config.Parse(file)
if err != nil {
return err
}
} else {
cfg = &config.Config{}
}
// Check if values are set
if len(cfg.Osd.ClusterConfig.ClusterId) == 0 {
cfg.Osd.ClusterConfig.ClusterId = c.String("clusterid")
}
if len(cfg.Osd.ClusterConfig.NodeId) == 0 {
cfg.Osd.ClusterConfig.NodeId = c.String("nodeid")
}
// Get driver information
driverInfoList := c.StringSlice("driver")
if len(driverInfoList) != 0 {
if cfg.Osd.Drivers == nil {
cfg.Osd.Drivers = make(map[string]map[string]string)
}
params := make(map[string]string)
var name string
// many driver infos provided as a []string
for _, driverInfo := range driverInfoList {
// driverInfo of the format name=xxx,opt1=val1,opt2=val2
for _, pair := range strings.Split(driverInfo, ",") {
kv := strings.Split(pair, "=")
if len(kv) != 2 {
return fmt.Errorf("driver option has a an invalid pair %s", kv)
}
k := kv[0]
v := kv[1]
if len(k) == 0 || len(v) == 0 {
return fmt.Errorf("driver option '%s' is invalid", pair)
}
if k == "name" {
// Driver name
name = v
} else {
// Options for driver
params[k] = v
}
}
if len(name) == 0 {
return fmt.Errorf("driver option is missing driver name")
}
cfg.Osd.Drivers[name] = params
}
}
if len(cfg.Osd.Drivers) == 0 {
return fmt.Errorf("Must supply driver information")
}
kvdbURL := c.String("kvdb")
u, err := url.Parse(kvdbURL)
scheme := u.Scheme
u.Scheme = "http"
kv, err := kvdb.New(scheme, "openstorage", []string{u.String()}, nil, kvdb.LogFatalErrorCB)
if err != nil {
return fmt.Errorf("Failed to initialize KVDB: %v (%v)\nSupported datastores: %v", scheme, err, datastores)
}
if err := kvdb.SetInstance(kv); err != nil {
return fmt.Errorf("Failed to initialize KVDB: %v", err)
}
// Start the cluster state machine, if enabled.
clusterInit := false
if cfg.Osd.ClusterConfig.NodeId != "" && cfg.Osd.ClusterConfig.ClusterId != "" {
logrus.Infof("OSD enabling cluster mode.")
if err := clustermanager.Init(cfg.Osd.ClusterConfig); err != nil {
return fmt.Errorf("Unable to init cluster server: %v", err)
}
if err := server.StartClusterAPI(cluster.APIBase, 0); err != nil {
return fmt.Errorf("Unable to start cluster API server: %v", err)
}
clusterInit = true
}
isDefaultSet := false
// Start the volume drivers.
for d, v := range cfg.Osd.Drivers {
logrus.Infof("Starting volume driver: %v", d)
if err := volumedrivers.Register(d, v); err != nil {
return fmt.Errorf("Unable to start volume driver: %v, %v", d, err)
}
var mgmtPort, pluginPort uint64
if port, ok := v[config.MgmtPortKey]; ok {
mgmtPort, err = strconv.ParseUint(port, 10, 16)
if err != nil {
return fmt.Errorf("Invalid OSD Config File. Invalid Mgmt Port number for Driver : %s", d)
}
} else if c.String("mgmtport") != "" {
mgmtPort, err = strconv.ParseUint(c.String("mgmtport"), 10, 16)
if err != nil {
return fmt.Errorf("Invalid Mgmt Port number for Driver : %s", d)
}
} else {
mgmtPort = 0
}
if port, ok := v[config.PluginPortKey]; ok {
pluginPort, err = strconv.ParseUint(port, 10, 16)
if err != nil {
return fmt.Errorf("Invalid OSD Config File. Invalid Plugin Port number for Driver : %s", d)
}
} else {
pluginPort = 0
}
sdksocket := fmt.Sprintf("/var/lib/osd/driver/%s-sdk.sock", d)
if err := server.StartVolumePluginAPI(
d, sdksocket,
volume.PluginAPIBase,
uint16(pluginPort),
); err != nil {
return fmt.Errorf("Unable to start plugin api server: %v", err)
}
if _, _, err := server.StartVolumeMgmtAPI(
d, sdksocket,
volume.DriverAPIBase,
uint16(mgmtPort),
false,
); err != nil {
return fmt.Errorf("Unable to start volume mgmt api server: %v", err)
}
if d != "" && cfg.Osd.ClusterConfig.DefaultDriver == d {
isDefaultSet = true
}
// Start CSI Server for this driver
csisock := os.Getenv("CSI_ENDPOINT")
if len(csisock) == 0 {
csisock = fmt.Sprintf("/var/lib/osd/driver/%s-csi.sock", d)
}
os.Remove(csisock)
cm, err := clustermanager.Inst()
if err != nil {
return fmt.Errorf("Unable to find cluster instance: %v", err)
}
csiServer, err := csi.NewOsdCsiServer(&csi.OsdCsiServerConfig{
Net: "unix",
Address: csisock,
DriverName: d,
Cluster: cm,
SdkUds: sdksocket,
})
if err != nil {
return fmt.Errorf("Failed to start CSI server for driver %s: %v", d, err)
}
csiServer.Start()
// Create a role manager
rm, err := role.NewSdkRoleManager(kv)
if err != nil {
return fmt.Errorf("Failed to create a role manager")
}
// Get authenticators
authenticators := make(map[string]auth.Authenticator)
selfSigned, err := selfSignedAuth(c)
if err != nil {
logrus.Fatalf("Failed to create self signed config: %v", err)
} else if selfSigned != nil {
authenticators[c.String("jwt-issuer")] = selfSigned
}
oidcAuth, err := oidcAuth(c)
if err != nil {
logrus.Fatalf("Failed to create self signed config: %v", err)
} else if oidcAuth != nil {
authenticators[c.String("oidc-issuer")] = oidcAuth
}
tlsConfig, err := setupSdkTls(c)
if err != nil {
logrus.Fatalf("Failed to access TLS file information: %v", err)
}
// Auth is enabled, setup system token manager for inter-cluster communication
if len(authenticators) > 0 {
if c.String("jwt-system-shared-secret") == "" {
return fmt.Errorf("Must provide a jwt-system-shared-secret if auth with oidc or shared-secret is enabled")
}
if len(cfg.Osd.ClusterConfig.SystemSharedSecret) == 0 {
cfg.Osd.ClusterConfig.SystemSharedSecret = c.String("jwt-system-shared-secret")
}
// Initialize system token manager if an authenticator is setup
stm, err := systemtoken.NewManager(&systemtoken.Config{
ClusterId: cfg.Osd.ClusterConfig.ClusterId,
NodeId: cfg.Osd.ClusterConfig.NodeId,
SharedSecret: cfg.Osd.ClusterConfig.SystemSharedSecret,
})
if err != nil {
return fmt.Errorf("Failed to create system token manager: %v\n", err)
}
auth.InitSystemTokenManager(stm)
}
sp, err := policy.Init()
if err != nil {
return fmt.Errorf("Unable to Initialise Storage Policy Manager Instances %v", err)
}
// Start SDK Server for this driver
os.Remove(sdksocket)
sdkServer, err := sdk.New(&sdk.ServerConfig{
Net: "tcp",
Address: ":" + c.String("sdkport"),
RestPort: c.String("sdkrestport"),
Socket: sdksocket,
DriverName: d,
Cluster: cm,
StoragePolicy: sp,
Security: &sdk.SecurityConfig{
Role: rm,
Tls: tlsConfig,
Authenticators: authenticators,
},
})
if err != nil {
return fmt.Errorf("Failed to start SDK server for driver %s: %v", d, err)
}
sdkServer.Start()
}
if cfg.Osd.ClusterConfig.DefaultDriver != "" && !isDefaultSet {
return fmt.Errorf("Invalid OSD config file: Default Driver specified but driver not initialized")
}
if err := flexvolume.StartFlexVolumeAPI(config.FlexVolumePort, cfg.Osd.ClusterConfig.DefaultDriver); err != nil {
return fmt.Errorf("Unable to start flexvolume API: %v", err)
}
// Start the graph drivers.
for d := range cfg.Osd.GraphDrivers {
logrus.Infof("Starting graph driver: %v", d)
if err := server.StartGraphAPI(d, volume.PluginAPIBase); err != nil {
return fmt.Errorf("Unable to start graph plugin: %v", err)
}
}
if clusterInit {
cm, err := clustermanager.Inst()
if err != nil {
return fmt.Errorf("Unable to find cluster instance: %v", err)
}
if err := cm.StartWithConfiguration(
false,
"9002",
[]string{},
c.String("clusterdomain"),
&cluster.ClusterServerConfiguration{
ConfigSchedManager: schedpolicy.NewFakeScheduler(),
ConfigObjectStoreManager: objectstore.NewfakeObjectstore(),
ConfigSystemTokenManager: auth.SystemTokenManagerInst(),
},
); err != nil {
return fmt.Errorf("Unable to start cluster manager: %v", err)
}
}
// Daemon does not exit.
select {}
}
func showVersion(c *cli.Context) error {
fmt.Println("OSD Version:", config.Version)
fmt.Println("Go Version:", runtime.Version())
fmt.Println("OS:", runtime.GOOS)
fmt.Println("Arch:", runtime.GOARCH)
return nil
}
func wrapAction(f func(*cli.Context) error) func(*cli.Context) {
return func(c *cli.Context) {
if err := f(c); err != nil {
logrus.Warnln(err.Error())
os.Exit(1)
}
}
}
func selfSignedAuth(c *cli.Context) (*auth.JwtAuthenticator, error) {
var err error
rsaFile := getConfigVar(os.Getenv("OPENSTORAGE_AUTH_RSA_PUBKEY"),
c.String("jwt-rsa-pubkey-file"))
ecdsFile := getConfigVar(os.Getenv("OPENSTORAGE_AUTH_ECDS_PUBKEY"),
c.String("jwt-ecds-pubkey-file"))
sharedsecret := getConfigVar(os.Getenv("OPENSTORAGE_AUTH_SHAREDSECRET"),
c.String("jwt-shared-secret"))
if len(rsaFile) == 0 &&
len(ecdsFile) == 0 &&
len(sharedsecret) == 0 {
return nil, nil
}
authConfig := &auth.JwtAuthConfig{
SharedSecret: []byte(sharedsecret),
UsernameClaim: auth.UsernameClaimType(c.String("username-claim")),
}
// Read RSA file
if len(rsaFile) != 0 {
authConfig.RsaPublicPem, err = ioutil.ReadFile(rsaFile)
if err != nil {
logrus.Errorf("Failed to read %s", rsaFile)
}
}
// Read Ecds file
if len(ecdsFile) != 0 {
authConfig.ECDSPublicPem, err = ioutil.ReadFile(ecdsFile)
if err != nil {
logrus.Errorf("Failed to read %s", ecdsFile)
}
}
return auth.NewJwtAuth(authConfig)
}
func oidcAuth(c *cli.Context) (*auth.OIDCAuthenticator, error) {
if len(c.String("oidc-issuer")) == 0 ||
len(c.String("oidc-client-id")) == 0 {
return nil, nil
}
return auth.NewOIDC(&auth.OIDCAuthConfig{
Issuer: c.String("oidc-issuer"),
ClientID: c.String("oidc-client-id"),
Namespace: c.String("oidc-custom-claim-namespace"),
SkipClientIDCheck: c.Bool("oidc-skip-client-id-check"),
UsernameClaim: auth.UsernameClaimType(c.String("username-claim")),
})
}
func setupSdkTls(c *cli.Context) (*sdk.TLSConfig, error) {
certFile := getConfigVar(os.Getenv("OPENSTORAGE_CERTFILE"),
c.String("tls-cert-file"))
keyFile := getConfigVar(os.Getenv("OPENSTORAGE_KEYFILE"),
c.String("tls-key-file"))
if len(certFile) != 0 && len(keyFile) != 0 {
logrus.Infof("TLS %s and %s", certFile, keyFile)
return &sdk.TLSConfig{
CertFile: certFile,
KeyFile: keyFile,
}, nil
}
return nil, nil
}
func getConfigVar(envVar, cliVar string) string {
if len(envVar) != 0 {
return envVar
}
return cliVar
}
| 1 | 8,380 | `--public-volume-create-allowed=true` and set the default to `true`, then use `!c.Bool('...')` | libopenstorage-openstorage | go |
@@ -867,7 +867,9 @@ static int handle_goaway_frame(h2o_http2_conn_t *conn, h2o_http2_frame_t *frame,
if ((ret = h2o_http2_decode_goaway_payload(&payload, frame, err_desc)) != 0)
return ret;
- /* nothing to do, since we do not open new streams by ourselves */
+ /* stop opening new push streams hereafter */
+ conn->push_stream_ids.max_open = INT32_MAX;
+
return 0;
}
| 1 | /*
* Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku, Fastly, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include "h2o.h"
#include "h2o/http1.h"
#include "h2o/http2.h"
#include "h2o/http2_internal.h"
static const h2o_iovec_t CONNECTION_PREFACE = {H2O_STRLIT("PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n")};
const h2o_http2_priority_t h2o_http2_default_priority = {
0, /* exclusive */
0, /* dependency */
16 /* weight */
};
const h2o_http2_settings_t H2O_HTTP2_SETTINGS_HOST = {
4096, /* header_table_size */
0, /* enable_push (clients are never allowed to initiate server push; RFC 7540 Section 8.2) */
100, /* max_concurrent_streams */
65535, /* initial_window_size */
16384 /* max_frame_size */
};
static const h2o_iovec_t SERVER_PREFACE = {H2O_STRLIT("\x00\x00\x06" /* frame size */
"\x04" /* settings frame */
"\x00" /* no flags */
"\x00\x00\x00\x00" /* stream id */
"\x00\x03"
"\x00\x00\x00\x64" /* max_concurrent_streams = 100 */
"\x00\x00\x04" /* frame size */
"\x08" /* window_update */
"\x00" /* no flags */
"\x00\x00\x00\x00" /* stream id */
"\x00\xff\x00\x01" /* 16777216 - 65535 */
)};
static __thread h2o_buffer_prototype_t wbuf_buffer_prototype = {{16}, {H2O_HTTP2_DEFAULT_OUTBUF_SIZE}};
static void initiate_graceful_shutdown(h2o_context_t *ctx);
static int close_connection(h2o_http2_conn_t *conn);
static ssize_t expect_default(h2o_http2_conn_t *conn, const uint8_t *src, size_t len, const char **err_desc);
static void do_emit_writereq(h2o_http2_conn_t *conn);
static void on_read(h2o_socket_t *sock, const char *err);
static void push_path(h2o_req_t *src_req, const char *abspath, size_t abspath_len, int is_critical);
static int foreach_request(h2o_context_t *ctx, int (*cb)(h2o_req_t *req, void *cbdata), void *cbdata);
static void stream_send_error(h2o_http2_conn_t *conn, uint32_t stream_id, int errnum);
const h2o_protocol_callbacks_t H2O_HTTP2_CALLBACKS = {initiate_graceful_shutdown, foreach_request};
static int is_idle_stream_id(h2o_http2_conn_t *conn, uint32_t stream_id)
{
return (h2o_http2_stream_is_push(stream_id) ? conn->push_stream_ids.max_open : conn->pull_stream_ids.max_open) < stream_id;
}
static void enqueue_goaway(h2o_http2_conn_t *conn, int errnum, h2o_iovec_t additional_data)
{
if (conn->state < H2O_HTTP2_CONN_STATE_IS_CLOSING) {
/* http2 spec allows sending GOAWAY more than once (for one reason since errors may arise after sending the first one) */
h2o_http2_encode_goaway_frame(&conn->_write.buf, conn->pull_stream_ids.max_open, errnum, additional_data);
h2o_http2_conn_request_write(conn);
conn->state = H2O_HTTP2_CONN_STATE_HALF_CLOSED;
}
}
static void graceful_shutdown_close_stragglers(h2o_timeout_entry_t *entry)
{
h2o_context_t *ctx = H2O_STRUCT_FROM_MEMBER(h2o_context_t, http2._graceful_shutdown_timeout, entry);
h2o_linklist_t *node, *next;
/* We've sent two GOAWAY frames, close the remaining connections */
for (node = ctx->http2._conns.next; node != &ctx->http2._conns; node = next) {
h2o_http2_conn_t *conn = H2O_STRUCT_FROM_MEMBER(h2o_http2_conn_t, _conns, node);
next = node->next;
close_connection(conn);
}
}
static void graceful_shutdown_resend_goaway(h2o_timeout_entry_t *entry)
{
h2o_context_t *ctx = H2O_STRUCT_FROM_MEMBER(h2o_context_t, http2._graceful_shutdown_timeout, entry);
h2o_linklist_t *node;
int do_close_stragglers = 0;
for (node = ctx->http2._conns.next; node != &ctx->http2._conns; node = node->next) {
h2o_http2_conn_t *conn = H2O_STRUCT_FROM_MEMBER(h2o_http2_conn_t, _conns, node);
if (conn->state < H2O_HTTP2_CONN_STATE_HALF_CLOSED) {
enqueue_goaway(conn, H2O_HTTP2_ERROR_NONE, (h2o_iovec_t){NULL});
do_close_stragglers = 1;
}
}
/* After waiting a second, we still had active connections. If configured, wait one
* final timeout before closing the connections */
if (do_close_stragglers && ctx->globalconf->http2.graceful_shutdown_timeout) {
ctx->http2._graceful_shutdown_timeout.cb = graceful_shutdown_close_stragglers;
h2o_timeout_link(ctx->loop, &ctx->http2.graceful_shutdown_timeout, &ctx->http2._graceful_shutdown_timeout);
}
}
static void initiate_graceful_shutdown(h2o_context_t *ctx)
{
/* draft-16 6.8
* A server that is attempting to gracefully shut down a connection SHOULD send an initial GOAWAY frame with the last stream
* identifier set to 231-1 and a NO_ERROR code. This signals to the client that a shutdown is imminent and that no further
* requests can be initiated. After waiting at least one round trip time, the server can send another GOAWAY frame with an
* updated last stream identifier. This ensures that a connection can be cleanly shut down without losing requests.
*/
h2o_linklist_t *node;
/* only doit once */
if (ctx->http2._graceful_shutdown_timeout.cb != NULL)
return;
ctx->http2._graceful_shutdown_timeout.cb = graceful_shutdown_resend_goaway;
for (node = ctx->http2._conns.next; node != &ctx->http2._conns; node = node->next) {
h2o_http2_conn_t *conn = H2O_STRUCT_FROM_MEMBER(h2o_http2_conn_t, _conns, node);
if (conn->state < H2O_HTTP2_CONN_STATE_HALF_CLOSED) {
h2o_http2_encode_goaway_frame(&conn->_write.buf, INT32_MAX, H2O_HTTP2_ERROR_NONE,
(h2o_iovec_t){H2O_STRLIT("graceful shutdown")});
h2o_http2_conn_request_write(conn);
}
}
h2o_timeout_link(ctx->loop, &ctx->one_sec_timeout, &ctx->http2._graceful_shutdown_timeout);
}
static void on_idle_timeout(h2o_timeout_entry_t *entry)
{
h2o_http2_conn_t *conn = H2O_STRUCT_FROM_MEMBER(h2o_http2_conn_t, _timeout_entry, entry);
enqueue_goaway(conn, H2O_HTTP2_ERROR_NONE, h2o_iovec_init(H2O_STRLIT("idle timeout")));
close_connection(conn);
}
static void update_idle_timeout(h2o_http2_conn_t *conn)
{
h2o_timeout_unlink(&conn->_timeout_entry);
if (conn->num_streams.blocked_by_server == 0 && conn->_write.buf_in_flight == NULL) {
conn->_timeout_entry.cb = on_idle_timeout;
h2o_timeout_link(conn->super.ctx->loop, &conn->super.ctx->http2.idle_timeout, &conn->_timeout_entry);
}
}
static int can_run_requests(h2o_http2_conn_t *conn)
{
return conn->num_streams.pull.half_closed + conn->num_streams.push.half_closed <
conn->super.ctx->globalconf->http2.max_concurrent_requests_per_connection;
}
static void run_pending_requests(h2o_http2_conn_t *conn)
{
h2o_linklist_t *link, *lnext;
int ran_one_request;
do {
ran_one_request = 0;
for (link = conn->_pending_reqs.next; link != &conn->_pending_reqs && can_run_requests(conn); link = lnext) {
/* fetch and detach a pending stream */
h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, _refs.link, link);
lnext = link->next;
if (stream->req.proceed_req != NULL) {
if (conn->num_streams._request_body_in_progress) {
continue;
}
conn->num_streams._request_body_in_progress++;
stream->_conn_stream_in_progress = 1;
} else {
if (stream->state < H2O_HTTP2_STREAM_STATE_SEND_HEADERS) {
h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_REQ_PENDING);
h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_SEND_HEADERS);
}
}
h2o_linklist_unlink(&stream->_refs.link);
ran_one_request = 1;
/* handle it */
if (!h2o_http2_stream_is_push(stream->stream_id) && conn->pull_stream_ids.max_processed < stream->stream_id)
conn->pull_stream_ids.max_processed = stream->stream_id;
h2o_process_request(&stream->req);
}
} while (ran_one_request && !h2o_linklist_is_empty(&conn->_pending_reqs));
}
static void execute_or_enqueue_request_core(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream)
{
/* TODO schedule the pending reqs using the scheduler */
h2o_linklist_insert(&conn->_pending_reqs, &stream->_refs.link);
run_pending_requests(conn);
update_idle_timeout(conn);
}
static void execute_or_enqueue_request(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream)
{
assert(stream->state == H2O_HTTP2_STREAM_STATE_RECV_HEADERS || stream->state == H2O_HTTP2_STREAM_STATE_REQ_PENDING);
h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_REQ_PENDING);
if (!stream->blocked_by_server)
h2o_http2_stream_set_blocked_by_server(conn, stream, 1);
execute_or_enqueue_request_core(conn, stream);
}
void h2o_http2_conn_register_stream(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream)
{
khiter_t iter;
int r;
iter = kh_put(h2o_http2_stream_t, conn->streams, stream->stream_id, &r);
assert(iter != kh_end(conn->streams));
kh_val(conn->streams, iter) = stream;
}
void h2o_http2_conn_unregister_stream(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream)
{
khiter_t iter = kh_get(h2o_http2_stream_t, conn->streams, stream->stream_id);
assert(iter != kh_end(conn->streams));
kh_del(h2o_http2_stream_t, conn->streams, iter);
assert(h2o_http2_scheduler_is_open(&stream->_refs.scheduler));
h2o_http2_scheduler_close(&stream->_refs.scheduler);
if (stream->_conn_stream_in_progress) {
h2o_http2_conn_t *conn = (h2o_http2_conn_t *)stream->req.conn;
stream->_conn_stream_in_progress = 0;
conn->num_streams._request_body_in_progress--;
}
switch (stream->state) {
case H2O_HTTP2_STREAM_STATE_RECV_BODY:
if (h2o_linklist_is_linked(&stream->_refs.link))
h2o_linklist_unlink(&stream->_refs.link);
/* fallthru */
case H2O_HTTP2_STREAM_STATE_IDLE:
case H2O_HTTP2_STREAM_STATE_RECV_HEADERS:
assert(!h2o_linklist_is_linked(&stream->_refs.link));
break;
case H2O_HTTP2_STREAM_STATE_REQ_PENDING:
assert(h2o_linklist_is_linked(&stream->_refs.link));
h2o_linklist_unlink(&stream->_refs.link);
break;
case H2O_HTTP2_STREAM_STATE_SEND_HEADERS:
case H2O_HTTP2_STREAM_STATE_SEND_BODY:
case H2O_HTTP2_STREAM_STATE_SEND_BODY_IS_FINAL:
case H2O_HTTP2_STREAM_STATE_END_STREAM:
if (h2o_linklist_is_linked(&stream->_refs.link))
h2o_linklist_unlink(&stream->_refs.link);
break;
}
if (stream->state != H2O_HTTP2_STREAM_STATE_END_STREAM)
h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_END_STREAM);
if (conn->state < H2O_HTTP2_CONN_STATE_IS_CLOSING) {
run_pending_requests(conn);
update_idle_timeout(conn);
}
}
static void close_connection_now(h2o_http2_conn_t *conn)
{
h2o_http2_stream_t *stream;
assert(!h2o_timeout_is_linked(&conn->_write.timeout_entry));
kh_foreach_value(conn->streams, stream, { h2o_http2_stream_close(conn, stream); });
assert(conn->num_streams.pull.open == 0);
assert(conn->num_streams.pull.half_closed == 0);
assert(conn->num_streams.pull.send_body == 0);
assert(conn->num_streams.push.half_closed == 0);
assert(conn->num_streams.push.send_body == 0);
assert(conn->num_streams.priority.open == 0);
kh_destroy(h2o_http2_stream_t, conn->streams);
assert(conn->_http1_req_input == NULL);
h2o_hpack_dispose_header_table(&conn->_input_header_table);
h2o_hpack_dispose_header_table(&conn->_output_header_table);
assert(h2o_linklist_is_empty(&conn->_pending_reqs));
h2o_timeout_unlink(&conn->_timeout_entry);
h2o_buffer_dispose(&conn->_write.buf);
if (conn->_write.buf_in_flight != NULL)
h2o_buffer_dispose(&conn->_write.buf_in_flight);
h2o_http2_scheduler_dispose(&conn->scheduler);
assert(h2o_linklist_is_empty(&conn->_write.streams_to_proceed));
assert(!h2o_timeout_is_linked(&conn->_write.timeout_entry));
if (conn->_headers_unparsed != NULL)
h2o_buffer_dispose(&conn->_headers_unparsed);
if (conn->push_memo != NULL)
h2o_cache_destroy(conn->push_memo);
if (conn->casper != NULL)
h2o_http2_casper_destroy(conn->casper);
h2o_linklist_unlink(&conn->_conns);
if (conn->sock != NULL)
h2o_socket_close(conn->sock);
free(conn);
}
int close_connection(h2o_http2_conn_t *conn)
{
conn->state = H2O_HTTP2_CONN_STATE_IS_CLOSING;
if (conn->_write.buf_in_flight != NULL || h2o_timeout_is_linked(&conn->_write.timeout_entry)) {
/* there is a pending write, let on_write_complete actually close the connection */
} else {
close_connection_now(conn);
return -1;
}
return 0;
}
static void stream_send_error(h2o_http2_conn_t *conn, uint32_t stream_id, int errnum)
{
assert(stream_id != 0);
assert(conn->state < H2O_HTTP2_CONN_STATE_IS_CLOSING);
conn->super.ctx->http2.events.protocol_level_errors[-errnum]++;
h2o_http2_encode_rst_stream_frame(&conn->_write.buf, stream_id, -errnum);
h2o_http2_conn_request_write(conn);
}
static void request_gathered_write(h2o_http2_conn_t *conn)
{
assert(conn->state < H2O_HTTP2_CONN_STATE_IS_CLOSING);
if (conn->sock->_cb.write == NULL && !h2o_timeout_is_linked(&conn->_write.timeout_entry))
h2o_timeout_link(conn->super.ctx->loop, &conn->super.ctx->zero_timeout, &conn->_write.timeout_entry);
}
static int update_stream_output_window(h2o_http2_stream_t *stream, ssize_t delta)
{
ssize_t cur = h2o_http2_window_get_window(&stream->output_window);
if (h2o_http2_window_update(&stream->output_window, delta) != 0)
return -1;
if (cur <= 0 && h2o_http2_window_get_window(&stream->output_window) > 0 &&
(h2o_http2_stream_has_pending_data(stream) || stream->state >= H2O_HTTP2_STREAM_STATE_SEND_BODY_IS_FINAL)) {
assert(!h2o_linklist_is_linked(&stream->_refs.link));
h2o_http2_scheduler_activate(&stream->_refs.scheduler);
}
return 0;
}
static void handle_request_body_chunk(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream, h2o_iovec_t payload, int is_end_stream)
{
stream->_req_body.bytes_received += payload.len;
/* check size */
if (stream->_req_body.bytes_received > conn->super.ctx->globalconf->max_request_entity_size) {
stream_send_error(conn, stream->stream_id, H2O_HTTP2_ERROR_REFUSED_STREAM);
h2o_http2_stream_reset(conn, stream);
return;
}
if (stream->req.content_length != SIZE_MAX) {
size_t received = stream->_req_body.bytes_received, cl = stream->req.content_length;
if (is_end_stream ? (received != cl) : (received > cl)) {
stream_send_error(conn, stream->stream_id, H2O_HTTP2_ERROR_PROTOCOL);
h2o_http2_stream_reset(conn, stream);
return;
}
}
/* update timer */
if (!stream->blocked_by_server)
h2o_http2_stream_set_blocked_by_server(conn, stream, 1);
/* handle input */
if (is_end_stream) {
h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_REQ_PENDING);
if (stream->req.proceed_req != NULL)
h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_SEND_HEADERS);
}
if (stream->req.write_req.cb(stream->req.write_req.ctx, payload, is_end_stream) != 0) {
stream_send_error(conn, stream->stream_id, H2O_HTTP2_ERROR_STREAM_CLOSED);
h2o_http2_stream_reset(conn, stream);
}
}
static int handle_incoming_request(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream, const uint8_t *src, size_t len,
const char **err_desc)
{
int ret, header_exists_map;
assert(stream->state == H2O_HTTP2_STREAM_STATE_RECV_HEADERS);
header_exists_map = 0;
if ((ret = h2o_hpack_parse_headers(&stream->req, &conn->_input_header_table, src, len, &header_exists_map,
&stream->req.content_length, &stream->cache_digests, err_desc)) != 0) {
/* all errors except invalid-header-char are connection errors */
if (ret != H2O_HTTP2_ERROR_INVALID_HEADER_CHAR)
return ret;
}
/* handle stream-level errors */
#define EXPECTED_MAP \
(H2O_HPACK_PARSE_HEADERS_METHOD_EXISTS | H2O_HPACK_PARSE_HEADERS_PATH_EXISTS | H2O_HPACK_PARSE_HEADERS_SCHEME_EXISTS)
if ((header_exists_map & EXPECTED_MAP) != EXPECTED_MAP) {
ret = H2O_HTTP2_ERROR_PROTOCOL;
goto SendRSTStream;
}
#undef EXPECTED_MAP
if (conn->num_streams.pull.open > H2O_HTTP2_SETTINGS_HOST.max_concurrent_streams) {
ret = H2O_HTTP2_ERROR_REFUSED_STREAM;
goto SendRSTStream;
}
/* handle request to send response */
if (ret != 0) {
assert(ret == H2O_HTTP2_ERROR_INVALID_HEADER_CHAR);
/* fast forward the stream's state so that we can start sending the response */
h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_REQ_PENDING);
h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_SEND_HEADERS);
h2o_send_error_400(&stream->req, "Invalid Headers", *err_desc, 0);
return 0;
}
if (stream->_req_body.body == NULL) {
execute_or_enqueue_request(conn, stream);
} else {
h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_RECV_BODY);
}
return 0;
SendRSTStream:
stream_send_error(conn, stream->stream_id, ret);
h2o_http2_stream_reset(conn, stream);
return 0;
}
static int handle_trailing_headers(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream, const uint8_t *src, size_t len,
const char **err_desc)
{
size_t dummy_content_length;
int ret;
if ((ret = h2o_hpack_parse_headers(&stream->req, &conn->_input_header_table, src, len, NULL, &dummy_content_length, NULL,
err_desc)) != 0)
return ret;
handle_request_body_chunk(conn, stream, h2o_iovec_init(NULL, 0), 1);
return 0;
}
static ssize_t expect_continuation_of_headers(h2o_http2_conn_t *conn, const uint8_t *src, size_t len, const char **err_desc)
{
h2o_http2_frame_t frame;
ssize_t ret;
h2o_http2_stream_t *stream;
int hret;
if ((ret = h2o_http2_decode_frame(&frame, src, len, &H2O_HTTP2_SETTINGS_HOST, err_desc)) < 0)
return ret;
if (frame.type != H2O_HTTP2_FRAME_TYPE_CONTINUATION) {
*err_desc = "expected CONTINUATION frame";
return H2O_HTTP2_ERROR_PROTOCOL;
}
if (conn->state >= H2O_HTTP2_CONN_STATE_HALF_CLOSED)
return 0;
if ((stream = h2o_http2_conn_get_stream(conn, frame.stream_id)) == NULL ||
!(stream->state == H2O_HTTP2_STREAM_STATE_RECV_HEADERS || stream->state == H2O_HTTP2_STREAM_STATE_RECV_BODY)) {
*err_desc = "unexpected stream id in CONTINUATION frame";
return H2O_HTTP2_ERROR_PROTOCOL;
}
if (conn->_headers_unparsed->size + frame.length <= H2O_MAX_REQLEN) {
h2o_buffer_reserve(&conn->_headers_unparsed, frame.length);
memcpy(conn->_headers_unparsed->bytes + conn->_headers_unparsed->size, frame.payload, frame.length);
conn->_headers_unparsed->size += frame.length;
if ((frame.flags & H2O_HTTP2_FRAME_FLAG_END_HEADERS) != 0) {
conn->_read_expect = expect_default;
if (stream->state == H2O_HTTP2_STREAM_STATE_RECV_HEADERS) {
hret = handle_incoming_request(conn, stream, (const uint8_t *)conn->_headers_unparsed->bytes,
conn->_headers_unparsed->size, err_desc);
} else {
hret = handle_trailing_headers(conn, stream, (const uint8_t *)conn->_headers_unparsed->bytes,
conn->_headers_unparsed->size, err_desc);
}
if (hret != 0)
ret = hret;
h2o_buffer_dispose(&conn->_headers_unparsed);
conn->_headers_unparsed = NULL;
}
} else {
/* request is too large (TODO log) */
stream_send_error(conn, stream->stream_id, H2O_HTTP2_ERROR_REFUSED_STREAM);
h2o_http2_stream_reset(conn, stream);
}
return ret;
}
static void update_input_window(h2o_http2_conn_t *conn, uint32_t stream_id, h2o_http2_window_t *window, size_t consumed)
{
h2o_http2_window_consume_window(window, consumed);
if (h2o_http2_window_get_window(window) * 2 < H2O_HTTP2_SETTINGS_HOST.initial_window_size) {
int32_t delta = (int32_t)(H2O_HTTP2_SETTINGS_HOST.initial_window_size - h2o_http2_window_get_window(window));
h2o_http2_encode_window_update_frame(&conn->_write.buf, stream_id, delta);
h2o_http2_conn_request_write(conn);
h2o_http2_window_update(window, delta);
}
}
static void set_priority(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream, const h2o_http2_priority_t *priority,
int scheduler_is_open)
{
h2o_http2_scheduler_node_t *parent_sched;
/* determine the parent */
if (priority->dependency != 0) {
h2o_http2_stream_t *parent_stream = h2o_http2_conn_get_stream(conn, priority->dependency);
if (parent_stream != NULL) {
parent_sched = &parent_stream->_refs.scheduler.node;
} else {
/* A dependency on a stream that is not currently in the tree - such as a stream in the "idle" state - results in that
* stream being given a default priority. (RFC 7540 5.3.1)
* It is possible for a stream to become closed while prioritization information that creates a dependency on that
* stream is in transit. If a stream identified in a dependency has no associated priority information, then the
* dependent stream is instead assigned a default priority. (RFC 7540 5.3.4)
*/
parent_sched = &conn->scheduler;
priority = &h2o_http2_default_priority;
}
} else {
parent_sched = &conn->scheduler;
}
/* setup the scheduler */
if (!scheduler_is_open) {
h2o_http2_scheduler_open(&stream->_refs.scheduler, parent_sched, priority->weight, priority->exclusive);
} else {
h2o_http2_scheduler_rebind(&stream->_refs.scheduler, parent_sched, priority->weight, priority->exclusive);
}
}
static void proceed_request(h2o_req_t *req, size_t written, int is_end_stream)
{
h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, req, req);
h2o_http2_conn_t *conn = (h2o_http2_conn_t *)stream->req.conn;
if (stream->blocked_by_server && stream->state == H2O_HTTP2_STREAM_STATE_RECV_BODY) {
h2o_http2_stream_set_blocked_by_server(conn, stream, 0);
update_idle_timeout(conn);
}
update_input_window(conn, stream->stream_id, &stream->input_window, written);
}
static int write_req_non_streaming(void *_req, h2o_iovec_t payload, int is_end_stream)
{
h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, req, _req);
if (h2o_buffer_append(&stream->_req_body.body, payload.base, payload.len) == 0)
return -1;
proceed_request(&stream->req, payload.len, is_end_stream);
if (is_end_stream) {
stream->req.entity = h2o_iovec_init(stream->_req_body.body->bytes, stream->_req_body.body->size);
execute_or_enqueue_request((h2o_http2_conn_t *)stream->req.conn, stream);
}
return 0;
}
static int write_req_streaming_pre_dispatch(void *_req, h2o_iovec_t payload, int is_end_stream)
{
h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, req, _req);
if (h2o_buffer_append(&stream->_req_body.body, payload.base, payload.len) == 0)
return -1;
stream->req.entity = h2o_iovec_init(stream->_req_body.body->bytes, stream->_req_body.body->size);
/* mark that we have seen eos */
if (is_end_stream)
stream->req.proceed_req = NULL;
return 0;
}
static int write_req_first(void *_req, h2o_iovec_t payload, int is_end_stream)
{
h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, req, _req);
h2o_handler_t *first_handler;
/* if possible, switch to either streaming request body mode */
if (!is_end_stream && (first_handler = h2o_get_first_handler(&stream->req)) != NULL &&
first_handler->supports_request_streaming) {
if (h2o_buffer_append(&stream->_req_body.body, payload.base, payload.len) == 0)
return -1;
stream->req.entity = h2o_iovec_init(stream->_req_body.body->bytes, stream->_req_body.body->size);
stream->req.write_req.cb = write_req_streaming_pre_dispatch;
stream->req.proceed_req = proceed_request;
execute_or_enqueue_request_core((h2o_http2_conn_t *)stream->req.conn, stream);
return 0;
}
stream->req.write_req.cb = write_req_non_streaming;
return write_req_non_streaming(stream->req.write_req.ctx, payload, is_end_stream);
}
static int handle_data_frame(h2o_http2_conn_t *conn, h2o_http2_frame_t *frame, const char **err_desc)
{
h2o_http2_data_payload_t payload;
h2o_http2_stream_t *stream;
int ret;
if ((ret = h2o_http2_decode_data_payload(&payload, frame, err_desc)) != 0)
return ret;
if (conn->state >= H2O_HTTP2_CONN_STATE_HALF_CLOSED)
return 0;
update_input_window(conn, 0, &conn->_input_window, payload.length);
/* save the input in the request body buffer, or send error (and close the stream) */
if ((stream = h2o_http2_conn_get_stream(conn, frame->stream_id)) == NULL) {
if (frame->stream_id <= conn->pull_stream_ids.max_open) {
stream_send_error(conn, frame->stream_id, H2O_HTTP2_ERROR_STREAM_CLOSED);
return 0;
} else {
*err_desc = "invalid DATA frame";
return H2O_HTTP2_ERROR_PROTOCOL;
}
}
if (stream->state != H2O_HTTP2_STREAM_STATE_RECV_BODY) {
stream_send_error(conn, frame->stream_id, H2O_HTTP2_ERROR_STREAM_CLOSED);
h2o_http2_stream_reset(conn, stream);
return 0;
}
handle_request_body_chunk(conn, stream, h2o_iovec_init(payload.data, payload.length),
(frame->flags & H2O_HTTP2_FRAME_FLAG_END_STREAM) != 0);
return 0;
}
static int handle_headers_frame(h2o_http2_conn_t *conn, h2o_http2_frame_t *frame, const char **err_desc)
{
h2o_http2_headers_payload_t payload;
h2o_http2_stream_t *stream;
int ret;
/* decode */
if ((ret = h2o_http2_decode_headers_payload(&payload, frame, err_desc)) != 0)
return ret;
if ((frame->stream_id & 1) == 0) {
*err_desc = "invalid stream id in HEADERS frame";
return H2O_HTTP2_ERROR_PROTOCOL;
}
if (!(conn->pull_stream_ids.max_open < frame->stream_id)) {
if ((stream = h2o_http2_conn_get_stream(conn, frame->stream_id)) != NULL &&
stream->state == H2O_HTTP2_STREAM_STATE_RECV_BODY) {
/* is a trailer */
if ((frame->flags & H2O_HTTP2_FRAME_FLAG_END_STREAM) == 0) {
*err_desc = "trailing HEADERS frame MUST have END_STREAM flag set";
return H2O_HTTP2_ERROR_PROTOCOL;
}
stream->req.entity = h2o_iovec_init(stream->_req_body.body->bytes, stream->_req_body.body->size);
if ((frame->flags & H2O_HTTP2_FRAME_FLAG_END_HEADERS) == 0)
goto PREPARE_FOR_CONTINUATION;
return handle_trailing_headers(conn, stream, payload.headers, payload.headers_len, err_desc);
} else if (!stream || stream->state != H2O_HTTP2_STREAM_STATE_IDLE) {
/* it's legit that stream exists and is IDLE if a PRIORITY frame was received earlier */
*err_desc = "invalid stream id in HEADERS frame";
return H2O_HTTP2_ERROR_STREAM_CLOSED;
}
}
if (frame->stream_id == payload.priority.dependency) {
*err_desc = "stream cannot depend on itself";
return H2O_HTTP2_ERROR_PROTOCOL;
}
if (conn->state >= H2O_HTTP2_CONN_STATE_HALF_CLOSED)
return 0;
/* open or determine the stream and prepare */
if ((stream = h2o_http2_conn_get_stream(conn, frame->stream_id)) != NULL) {
if ((frame->flags & H2O_HTTP2_FRAME_FLAG_PRIORITY) != 0) {
set_priority(conn, stream, &payload.priority, 1);
stream->received_priority = payload.priority;
}
} else {
stream = h2o_http2_stream_open(conn, frame->stream_id, NULL, &payload.priority);
set_priority(conn, stream, &payload.priority, 0);
}
h2o_http2_stream_prepare_for_request(conn, stream);
stream->req.write_req.cb = write_req_first;
stream->req.write_req.ctx = &stream->req;
/* setup container for request body if it is expected to arrive */
if ((frame->flags & H2O_HTTP2_FRAME_FLAG_END_STREAM) == 0)
h2o_buffer_init(&stream->_req_body.body, &h2o_socket_buffer_prototype);
if ((frame->flags & H2O_HTTP2_FRAME_FLAG_END_HEADERS) != 0) {
/* request is complete, handle it */
return handle_incoming_request(conn, stream, payload.headers, payload.headers_len, err_desc);
}
PREPARE_FOR_CONTINUATION:
/* request is not complete, store in buffer */
conn->_read_expect = expect_continuation_of_headers;
h2o_buffer_init(&conn->_headers_unparsed, &h2o_socket_buffer_prototype);
h2o_buffer_reserve(&conn->_headers_unparsed, payload.headers_len);
memcpy(conn->_headers_unparsed->bytes, payload.headers, payload.headers_len);
conn->_headers_unparsed->size = payload.headers_len;
return 0;
}
static int handle_priority_frame(h2o_http2_conn_t *conn, h2o_http2_frame_t *frame, const char **err_desc)
{
h2o_http2_priority_t payload;
h2o_http2_stream_t *stream;
int ret;
if ((ret = h2o_http2_decode_priority_payload(&payload, frame, err_desc)) != 0)
return ret;
if (frame->stream_id == payload.dependency) {
*err_desc = "stream cannot depend on itself";
return H2O_HTTP2_ERROR_PROTOCOL;
}
if ((stream = h2o_http2_conn_get_stream(conn, frame->stream_id)) != NULL) {
stream->received_priority = payload;
/* ignore priority changes to pushed streams with weight=257, since that is where we are trying to be smarter than the web
* browsers
*/
if (h2o_http2_scheduler_get_weight(&stream->_refs.scheduler) != 257)
set_priority(conn, stream, &payload, 1);
} else {
if (h2o_http2_stream_is_push(frame->stream_id)) {
/* Ignore PRIORITY frames for closed or idle pushed streams */
return 0;
} else {
/* Ignore PRIORITY frames for closed pull streams */
if (frame->stream_id <= conn->pull_stream_ids.max_open)
return 0;
}
if (conn->num_streams.priority.open >= conn->super.ctx->globalconf->http2.max_streams_for_priority) {
*err_desc = "too many streams in idle/closed state";
/* RFC 7540 10.5: An endpoint MAY treat activity that is suspicious as a connection error (Section 5.4.1) of type
* ENHANCE_YOUR_CALM.
*/
return H2O_HTTP2_ERROR_ENHANCE_YOUR_CALM;
}
stream = h2o_http2_stream_open(conn, frame->stream_id, NULL, &payload);
set_priority(conn, stream, &payload, 0);
}
return 0;
}
static void resume_send(h2o_http2_conn_t *conn)
{
if (h2o_http2_conn_get_buffer_window(conn) <= 0)
return;
#if 0 /* TODO reenable this check for performance? */
if (conn->scheduler.list.size == 0)
return;
#endif
request_gathered_write(conn);
}
static int handle_settings_frame(h2o_http2_conn_t *conn, h2o_http2_frame_t *frame, const char **err_desc)
{
if (frame->stream_id != 0) {
*err_desc = "invalid stream id in SETTINGS frame";
return H2O_HTTP2_ERROR_PROTOCOL;
}
if ((frame->flags & H2O_HTTP2_FRAME_FLAG_ACK) != 0) {
if (frame->length != 0) {
*err_desc = "invalid SETTINGS frame (+ACK)";
return H2O_HTTP2_ERROR_FRAME_SIZE;
}
} else {
uint32_t prev_initial_window_size = conn->peer_settings.initial_window_size;
/* FIXME handle SETTINGS_HEADER_TABLE_SIZE */
int ret = h2o_http2_update_peer_settings(&conn->peer_settings, frame->payload, frame->length, err_desc);
if (ret != 0)
return ret;
{ /* schedule ack */
h2o_iovec_t header_buf = h2o_buffer_reserve(&conn->_write.buf, H2O_HTTP2_FRAME_HEADER_SIZE);
h2o_http2_encode_frame_header((void *)header_buf.base, 0, H2O_HTTP2_FRAME_TYPE_SETTINGS, H2O_HTTP2_FRAME_FLAG_ACK, 0);
conn->_write.buf->size += H2O_HTTP2_FRAME_HEADER_SIZE;
h2o_http2_conn_request_write(conn);
}
/* apply the change to window size (to all the streams but not the connection, see 6.9.2 of draft-15) */
if (prev_initial_window_size != conn->peer_settings.initial_window_size) {
ssize_t delta = conn->peer_settings.initial_window_size - prev_initial_window_size;
h2o_http2_stream_t *stream;
kh_foreach_value(conn->streams, stream, { update_stream_output_window(stream, delta); });
resume_send(conn);
}
}
return 0;
}
static int handle_window_update_frame(h2o_http2_conn_t *conn, h2o_http2_frame_t *frame, const char **err_desc)
{
h2o_http2_window_update_payload_t payload;
int ret, err_is_stream_level;
if ((ret = h2o_http2_decode_window_update_payload(&payload, frame, err_desc, &err_is_stream_level)) != 0) {
if (err_is_stream_level) {
h2o_http2_stream_t *stream = h2o_http2_conn_get_stream(conn, frame->stream_id);
if (stream != NULL)
h2o_http2_stream_reset(conn, stream);
stream_send_error(conn, frame->stream_id, ret);
return 0;
} else {
return ret;
}
}
if (frame->stream_id == 0) {
if (h2o_http2_window_update(&conn->_write.window, payload.window_size_increment) != 0) {
*err_desc = "flow control window overflow";
return H2O_HTTP2_ERROR_FLOW_CONTROL;
}
} else if (!is_idle_stream_id(conn, frame->stream_id)) {
h2o_http2_stream_t *stream = h2o_http2_conn_get_stream(conn, frame->stream_id);
if (stream != NULL) {
if (update_stream_output_window(stream, payload.window_size_increment) != 0) {
h2o_http2_stream_reset(conn, stream);
stream_send_error(conn, frame->stream_id, H2O_HTTP2_ERROR_FLOW_CONTROL);
return 0;
}
}
} else {
*err_desc = "invalid stream id in WINDOW_UPDATE frame";
return H2O_HTTP2_ERROR_PROTOCOL;
}
resume_send(conn);
return 0;
}
static int handle_goaway_frame(h2o_http2_conn_t *conn, h2o_http2_frame_t *frame, const char **err_desc)
{
h2o_http2_goaway_payload_t payload;
int ret;
if ((ret = h2o_http2_decode_goaway_payload(&payload, frame, err_desc)) != 0)
return ret;
/* nothing to do, since we do not open new streams by ourselves */
return 0;
}
static int handle_ping_frame(h2o_http2_conn_t *conn, h2o_http2_frame_t *frame, const char **err_desc)
{
h2o_http2_ping_payload_t payload;
int ret;
if ((ret = h2o_http2_decode_ping_payload(&payload, frame, err_desc)) != 0)
return ret;
if ((frame->flags & H2O_HTTP2_FRAME_FLAG_ACK) == 0) {
h2o_http2_encode_ping_frame(&conn->_write.buf, 1, payload.data);
h2o_http2_conn_request_write(conn);
}
return 0;
}
static int handle_rst_stream_frame(h2o_http2_conn_t *conn, h2o_http2_frame_t *frame, const char **err_desc)
{
h2o_http2_rst_stream_payload_t payload;
h2o_http2_stream_t *stream;
int ret;
if ((ret = h2o_http2_decode_rst_stream_payload(&payload, frame, err_desc)) != 0)
return ret;
if (is_idle_stream_id(conn, frame->stream_id)) {
*err_desc = "unexpected stream id in RST_STREAM frame";
return H2O_HTTP2_ERROR_PROTOCOL;
}
stream = h2o_http2_conn_get_stream(conn, frame->stream_id);
if (stream != NULL) {
/* reset the stream */
h2o_http2_stream_reset(conn, stream);
}
/* TODO log */
return 0;
}
static int handle_push_promise_frame(h2o_http2_conn_t *conn, h2o_http2_frame_t *frame, const char **err_desc)
{
*err_desc = "received PUSH_PROMISE frame";
return H2O_HTTP2_ERROR_PROTOCOL;
}
static int handle_invalid_continuation_frame(h2o_http2_conn_t *conn, h2o_http2_frame_t *frame, const char **err_desc)
{
*err_desc = "received invalid CONTINUATION frame";
return H2O_HTTP2_ERROR_PROTOCOL;
}
ssize_t expect_default(h2o_http2_conn_t *conn, const uint8_t *src, size_t len, const char **err_desc)
{
h2o_http2_frame_t frame;
ssize_t ret;
static int (*FRAME_HANDLERS[])(h2o_http2_conn_t * conn, h2o_http2_frame_t * frame, const char **err_desc) = {
handle_data_frame, /* DATA */
handle_headers_frame, /* HEADERS */
handle_priority_frame, /* PRIORITY */
handle_rst_stream_frame, /* RST_STREAM */
handle_settings_frame, /* SETTINGS */
handle_push_promise_frame, /* PUSH_PROMISE */
handle_ping_frame, /* PING */
handle_goaway_frame, /* GOAWAY */
handle_window_update_frame, /* WINDOW_UPDATE */
handle_invalid_continuation_frame /* CONTINUATION */
};
if ((ret = h2o_http2_decode_frame(&frame, src, len, &H2O_HTTP2_SETTINGS_HOST, err_desc)) < 0)
return ret;
if (frame.type < sizeof(FRAME_HANDLERS) / sizeof(FRAME_HANDLERS[0])) {
int hret = FRAME_HANDLERS[frame.type](conn, &frame, err_desc);
if (hret != 0)
ret = hret;
} else {
fprintf(stderr, "skipping frame (type:%d)\n", frame.type);
}
return ret;
}
static ssize_t expect_preface(h2o_http2_conn_t *conn, const uint8_t *src, size_t len, const char **err_desc)
{
if (len < CONNECTION_PREFACE.len) {
return H2O_HTTP2_ERROR_INCOMPLETE;
}
if (memcmp(src, CONNECTION_PREFACE.base, CONNECTION_PREFACE.len) != 0) {
return H2O_HTTP2_ERROR_PROTOCOL_CLOSE_IMMEDIATELY;
}
{ /* send SETTINGS and connection-level WINDOW_UPDATE */
h2o_iovec_t vec = h2o_buffer_reserve(&conn->_write.buf, SERVER_PREFACE.len);
memcpy(vec.base, SERVER_PREFACE.base, SERVER_PREFACE.len);
conn->_write.buf->size += SERVER_PREFACE.len;
h2o_http2_conn_request_write(conn);
}
conn->_read_expect = expect_default;
return CONNECTION_PREFACE.len;
}
static int parse_input(h2o_http2_conn_t *conn)
{
/* handle the input */
while (conn->state < H2O_HTTP2_CONN_STATE_IS_CLOSING && conn->sock->input->size != 0) {
/* process a frame */
const char *err_desc = NULL;
ssize_t ret = conn->_read_expect(conn, (uint8_t *)conn->sock->input->bytes, conn->sock->input->size, &err_desc);
if (ret == H2O_HTTP2_ERROR_INCOMPLETE) {
break;
} else if (ret < 0) {
if (ret != H2O_HTTP2_ERROR_PROTOCOL_CLOSE_IMMEDIATELY) {
enqueue_goaway(conn, (int)ret,
err_desc != NULL ? (h2o_iovec_t){(char *)err_desc, strlen(err_desc)} : (h2o_iovec_t){NULL});
}
return close_connection(conn);
}
/* advance to the next frame */
h2o_buffer_consume(&conn->sock->input, ret);
}
return 0;
}
static void on_read(h2o_socket_t *sock, const char *err)
{
h2o_http2_conn_t *conn = sock->data;
if (err != NULL) {
conn->super.ctx->http2.events.read_closed++;
h2o_socket_read_stop(conn->sock);
close_connection(conn);
return;
}
if (parse_input(conn) != 0)
return;
update_idle_timeout(conn);
/* write immediately, if there is no write in flight and if pending write exists */
if (h2o_timeout_is_linked(&conn->_write.timeout_entry)) {
h2o_timeout_unlink(&conn->_write.timeout_entry);
do_emit_writereq(conn);
}
}
static void on_upgrade_complete(void *_conn, h2o_socket_t *sock, size_t reqsize)
{
h2o_http2_conn_t *conn = _conn;
if (sock == NULL) {
close_connection(conn);
return;
}
conn->sock = sock;
sock->data = conn;
conn->_http1_req_input = sock->input;
h2o_buffer_init(&sock->input, &h2o_socket_buffer_prototype);
/* setup inbound */
h2o_socket_read_start(conn->sock, on_read);
/* handle the request */
execute_or_enqueue_request(conn, h2o_http2_conn_get_stream(conn, 1));
if (conn->_http1_req_input->size > reqsize) {
size_t remaining_bytes = conn->_http1_req_input->size - reqsize;
h2o_buffer_reserve(&sock->input, remaining_bytes);
memcpy(sock->input->bytes, conn->_http1_req_input->bytes + reqsize, remaining_bytes);
sock->input->size += remaining_bytes;
on_read(conn->sock, NULL);
}
}
void h2o_http2_conn_request_write(h2o_http2_conn_t *conn)
{
if (conn->state == H2O_HTTP2_CONN_STATE_IS_CLOSING)
return;
request_gathered_write(conn);
}
void h2o_http2_conn_register_for_proceed_callback(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream)
{
h2o_http2_conn_request_write(conn);
if (h2o_http2_stream_has_pending_data(stream) || stream->state >= H2O_HTTP2_STREAM_STATE_SEND_BODY_IS_FINAL) {
if (h2o_http2_window_get_window(&stream->output_window) > 0) {
assert(!h2o_linklist_is_linked(&stream->_refs.link));
h2o_http2_scheduler_activate(&stream->_refs.scheduler);
}
} else {
h2o_linklist_insert(&conn->_write.streams_to_proceed, &stream->_refs.link);
}
}
static void on_notify_write(h2o_socket_t *sock, const char *err)
{
h2o_http2_conn_t *conn = sock->data;
if (err != NULL) {
close_connection_now(conn);
return;
}
do_emit_writereq(conn);
}
static void on_write_complete(h2o_socket_t *sock, const char *err)
{
h2o_http2_conn_t *conn = sock->data;
assert(conn->_write.buf_in_flight != NULL);
/* close by error if necessary */
if (err != NULL) {
conn->super.ctx->http2.events.write_closed++;
close_connection_now(conn);
return;
}
/* reset the other memory pool */
h2o_buffer_dispose(&conn->_write.buf_in_flight);
assert(conn->_write.buf_in_flight == NULL);
/* call the proceed callback of the streams that have been flushed (while unlinking them from the list) */
if (conn->state < H2O_HTTP2_CONN_STATE_IS_CLOSING) {
while (!h2o_linklist_is_empty(&conn->_write.streams_to_proceed)) {
h2o_http2_stream_t *stream =
H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, _refs.link, conn->_write.streams_to_proceed.next);
assert(!h2o_http2_stream_has_pending_data(stream));
h2o_linklist_unlink(&stream->_refs.link);
h2o_http2_stream_proceed(conn, stream);
}
}
/* update the timeout now that the states have been updated */
update_idle_timeout(conn);
/* cancel the write callback if scheduled (as the generator may have scheduled a write just before this function gets called) */
if (h2o_timeout_is_linked(&conn->_write.timeout_entry))
h2o_timeout_unlink(&conn->_write.timeout_entry);
#if !H2O_USE_LIBUV
if (conn->state == H2O_HTTP2_CONN_STATE_OPEN) {
if (conn->_write.buf->size != 0 || h2o_http2_scheduler_is_active(&conn->scheduler))
h2o_socket_notify_write(sock, on_notify_write);
return;
}
#endif
/* write more, if possible */
do_emit_writereq(conn);
}
static int emit_writereq_of_openref(h2o_http2_scheduler_openref_t *ref, int *still_is_active, void *cb_arg)
{
h2o_http2_conn_t *conn = cb_arg;
h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, _refs.scheduler, ref);
assert(h2o_http2_stream_has_pending_data(stream) || stream->state >= H2O_HTTP2_STREAM_STATE_SEND_BODY_IS_FINAL);
*still_is_active = 0;
h2o_http2_stream_send_pending_data(conn, stream);
if (h2o_http2_stream_has_pending_data(stream)) {
if (h2o_http2_window_get_window(&stream->output_window) <= 0) {
/* is blocked */
} else {
*still_is_active = 1;
}
} else {
h2o_linklist_insert(&conn->_write.streams_to_proceed, &stream->_refs.link);
}
return h2o_http2_conn_get_buffer_window(conn) > 0 ? 0 : -1;
}
void do_emit_writereq(h2o_http2_conn_t *conn)
{
assert(conn->_write.buf_in_flight == NULL);
/* push DATA frames */
if (conn->state < H2O_HTTP2_CONN_STATE_IS_CLOSING && h2o_http2_conn_get_buffer_window(conn) > 0)
h2o_http2_scheduler_run(&conn->scheduler, emit_writereq_of_openref, conn);
if (conn->_write.buf->size != 0) {
/* write and wait for completion */
h2o_iovec_t buf = {conn->_write.buf->bytes, conn->_write.buf->size};
h2o_socket_write(conn->sock, &buf, 1, on_write_complete);
conn->_write.buf_in_flight = conn->_write.buf;
h2o_buffer_init(&conn->_write.buf, &wbuf_buffer_prototype);
update_idle_timeout(conn);
}
/* close the connection if necessary */
switch (conn->state) {
case H2O_HTTP2_CONN_STATE_OPEN:
break;
case H2O_HTTP2_CONN_STATE_HALF_CLOSED:
if (conn->num_streams.pull.half_closed + conn->num_streams.push.half_closed != 0)
break;
conn->state = H2O_HTTP2_CONN_STATE_IS_CLOSING;
/* fall-thru */
case H2O_HTTP2_CONN_STATE_IS_CLOSING:
close_connection_now(conn);
break;
}
}
static void emit_writereq(h2o_timeout_entry_t *entry)
{
h2o_http2_conn_t *conn = H2O_STRUCT_FROM_MEMBER(h2o_http2_conn_t, _write.timeout_entry, entry);
do_emit_writereq(conn);
}
static socklen_t get_sockname(h2o_conn_t *_conn, struct sockaddr *sa)
{
h2o_http2_conn_t *conn = (void *)_conn;
return h2o_socket_getsockname(conn->sock, sa);
}
static socklen_t get_peername(h2o_conn_t *_conn, struct sockaddr *sa)
{
h2o_http2_conn_t *conn = (void *)_conn;
return h2o_socket_getpeername(conn->sock, sa);
}
static h2o_socket_t *get_socket(h2o_conn_t *_conn)
{
h2o_http2_conn_t *conn = (void *)_conn;
return conn->sock;
}
#define DEFINE_TLS_LOGGER(name) \
static h2o_iovec_t log_##name(h2o_req_t *req) \
{ \
h2o_http2_conn_t *conn = (void *)req->conn; \
return h2o_socket_log_ssl_##name(conn->sock, &req->pool); \
}
DEFINE_TLS_LOGGER(protocol_version)
DEFINE_TLS_LOGGER(session_reused)
DEFINE_TLS_LOGGER(cipher)
DEFINE_TLS_LOGGER(cipher_bits)
DEFINE_TLS_LOGGER(session_id)
#undef DEFINE_TLS_LOGGER
static h2o_iovec_t log_stream_id(h2o_req_t *req)
{
h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, req, req);
char *s = h2o_mem_alloc_pool(&stream->req.pool, sizeof(H2O_UINT32_LONGEST_STR));
size_t len = (size_t)sprintf(s, "%" PRIu32, stream->stream_id);
return h2o_iovec_init(s, len);
}
static h2o_iovec_t log_priority_received(h2o_req_t *req)
{
h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, req, req);
char *s = h2o_mem_alloc_pool(&stream->req.pool, sizeof("1:" H2O_UINT32_LONGEST_STR ":" H2O_UINT16_LONGEST_STR));
size_t len = (size_t)sprintf(s, "%c:%" PRIu32 ":%" PRIu16, stream->received_priority.exclusive ? '1' : '0',
stream->received_priority.dependency, stream->received_priority.weight);
return h2o_iovec_init(s, len);
}
static h2o_iovec_t log_priority_received_exclusive(h2o_req_t *req)
{
h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, req, req);
return h2o_iovec_init(stream->received_priority.exclusive ? "1" : "0", 1);
}
static h2o_iovec_t log_priority_received_parent(h2o_req_t *req)
{
h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, req, req);
char *s = h2o_mem_alloc_pool(&stream->req.pool, sizeof(H2O_UINT32_LONGEST_STR));
size_t len = sprintf(s, "%" PRIu32, stream->received_priority.dependency);
return h2o_iovec_init(s, len);
}
static h2o_iovec_t log_priority_received_weight(h2o_req_t *req)
{
h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, req, req);
char *s = h2o_mem_alloc_pool(&stream->req.pool, sizeof(H2O_UINT16_LONGEST_STR));
size_t len = sprintf(s, "%" PRIu16, stream->received_priority.weight);
return h2o_iovec_init(s, len);
}
static uint32_t get_parent_stream_id(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream)
{
h2o_http2_scheduler_node_t *parent_sched = h2o_http2_scheduler_get_parent(&stream->_refs.scheduler);
if (parent_sched == &conn->scheduler) {
return 0;
} else {
h2o_http2_stream_t *parent_stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, _refs.scheduler, parent_sched);
return parent_stream->stream_id;
}
}
static h2o_iovec_t log_priority_actual(h2o_req_t *req)
{
h2o_http2_conn_t *conn = (void *)req->conn;
h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, req, req);
char *s = h2o_mem_alloc_pool(&stream->req.pool, sizeof(H2O_UINT32_LONGEST_STR ":" H2O_UINT16_LONGEST_STR));
size_t len = (size_t)sprintf(s, "%" PRIu32 ":%" PRIu16, get_parent_stream_id(conn, stream),
h2o_http2_scheduler_get_weight(&stream->_refs.scheduler));
return h2o_iovec_init(s, len);
}
static h2o_iovec_t log_priority_actual_parent(h2o_req_t *req)
{
h2o_http2_conn_t *conn = (void *)req->conn;
h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, req, req);
char *s = h2o_mem_alloc_pool(&stream->req.pool, sizeof(H2O_UINT32_LONGEST_STR));
size_t len = (size_t)sprintf(s, "%" PRIu32, get_parent_stream_id(conn, stream));
return h2o_iovec_init(s, len);
}
static h2o_iovec_t log_priority_actual_weight(h2o_req_t *req)
{
h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, req, req);
char *s = h2o_mem_alloc_pool(&stream->req.pool, sizeof(H2O_UINT16_LONGEST_STR));
size_t len = (size_t)sprintf(s, "%" PRIu16, h2o_http2_scheduler_get_weight(&stream->_refs.scheduler));
return h2o_iovec_init(s, len);
}
static h2o_http2_conn_t *create_conn(h2o_context_t *ctx, h2o_hostconf_t **hosts, h2o_socket_t *sock, struct timeval connected_at)
{
static const h2o_conn_callbacks_t callbacks = {
get_sockname, /* stringify address */
get_peername, /* ditto */
push_path, /* HTTP2 push */
get_socket, /* get underlying socket */
h2o_http2_get_debug_state, /* get debug state */
{{
{log_protocol_version, log_session_reused, log_cipher, log_cipher_bits, log_session_id}, /* ssl */
{NULL}, /* http1 */
{log_stream_id, log_priority_received, log_priority_received_exclusive, log_priority_received_parent,
log_priority_received_weight, log_priority_actual, log_priority_actual_parent, log_priority_actual_weight} /* http2 */
}} /* loggers */
};
h2o_http2_conn_t *conn = (void *)h2o_create_connection(sizeof(*conn), ctx, hosts, connected_at, &callbacks);
memset((char *)conn + sizeof(conn->super), 0, sizeof(*conn) - sizeof(conn->super));
conn->sock = sock;
conn->peer_settings = H2O_HTTP2_SETTINGS_DEFAULT;
conn->streams = kh_init(h2o_http2_stream_t);
h2o_http2_scheduler_init(&conn->scheduler);
conn->state = H2O_HTTP2_CONN_STATE_OPEN;
h2o_linklist_insert(&ctx->http2._conns, &conn->_conns);
conn->_read_expect = expect_preface;
conn->_input_header_table.hpack_capacity = conn->_input_header_table.hpack_max_capacity =
H2O_HTTP2_SETTINGS_DEFAULT.header_table_size;
h2o_http2_window_init(&conn->_input_window, &H2O_HTTP2_SETTINGS_DEFAULT);
conn->_output_header_table.hpack_capacity = H2O_HTTP2_SETTINGS_HOST.header_table_size;
h2o_linklist_init_anchor(&conn->_pending_reqs);
h2o_buffer_init(&conn->_write.buf, &wbuf_buffer_prototype);
h2o_linklist_init_anchor(&conn->_write.streams_to_proceed);
conn->_write.timeout_entry.cb = emit_writereq;
h2o_http2_window_init(&conn->_write.window, &conn->peer_settings);
return conn;
}
static int update_push_memo(h2o_http2_conn_t *conn, h2o_req_t *src_req, const char *abspath, size_t abspath_len)
{
if (conn->push_memo == NULL)
conn->push_memo = h2o_cache_create(0, 1024, 1, NULL);
/* uses the hash as the key */
h2o_cache_hashcode_t url_hash = h2o_cache_calchash(src_req->input.scheme->name.base, src_req->input.scheme->name.len) ^
h2o_cache_calchash(src_req->input.authority.base, src_req->input.authority.len) ^
h2o_cache_calchash(abspath, abspath_len);
return h2o_cache_set(conn->push_memo, 0, h2o_iovec_init(&url_hash, sizeof(url_hash)), url_hash, h2o_iovec_init(NULL, 0));
}
static void push_path(h2o_req_t *src_req, const char *abspath, size_t abspath_len, int is_critical)
{
h2o_http2_conn_t *conn = (void *)src_req->conn;
h2o_http2_stream_t *src_stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, req, src_req);
/* RFC 7540 8.2.1: PUSH_PROMISE frames can be sent by the server in response to any client-initiated stream */
if (h2o_http2_stream_is_push(src_stream->stream_id))
return;
if (!src_stream->req.hostconf->http2.push_preload || !conn->peer_settings.enable_push ||
conn->num_streams.push.open >= conn->peer_settings.max_concurrent_streams)
return;
if (conn->push_stream_ids.max_open >= 0x7ffffff0)
return;
if (!(h2o_linklist_is_empty(&conn->_pending_reqs) && can_run_requests(conn)))
return;
if (h2o_find_header(&src_stream->req.headers, H2O_TOKEN_X_FORWARDED_FOR, -1) != -1)
return;
if (src_stream->cache_digests != NULL) {
h2o_iovec_t url = h2o_concat(&src_stream->req.pool, src_stream->req.input.scheme->name, h2o_iovec_init(H2O_STRLIT("://")),
src_stream->req.input.authority, h2o_iovec_init(abspath, abspath_len));
if (h2o_cache_digests_lookup_by_url(src_stream->cache_digests, url.base, url.len) == H2O_CACHE_DIGESTS_STATE_FRESH)
return;
}
/* delayed initialization of casper (cookie-based), that MAY be used together to cache-digests */
if (src_stream->req.hostconf->http2.casper.capacity_bits != 0) {
if (!src_stream->pull.casper_is_ready) {
src_stream->pull.casper_is_ready = 1;
if (conn->casper == NULL)
h2o_http2_conn_init_casper(conn, src_stream->req.hostconf->http2.casper.capacity_bits);
ssize_t header_index;
for (header_index = -1;
(header_index = h2o_find_header(&src_stream->req.headers, H2O_TOKEN_COOKIE, header_index)) != -1;) {
h2o_header_t *header = src_stream->req.headers.entries + header_index;
h2o_http2_casper_consume_cookie(conn->casper, header->value.base, header->value.len);
}
}
}
/* update the push memo, and if it already pushed on the same connection, return */
if (update_push_memo(conn, &src_stream->req, abspath, abspath_len))
return;
/* open the stream */
h2o_http2_stream_t *stream = h2o_http2_stream_open(conn, conn->push_stream_ids.max_open + 2, NULL, &h2o_http2_default_priority);
stream->received_priority.dependency = src_stream->stream_id;
stream->push.parent_stream_id = src_stream->stream_id;
if (is_critical) {
h2o_http2_scheduler_open(&stream->_refs.scheduler, &conn->scheduler, 257, 0);
} else {
h2o_http2_scheduler_open(&stream->_refs.scheduler, &src_stream->_refs.scheduler.node, 16, 0);
}
h2o_http2_stream_prepare_for_request(conn, stream);
/* setup request */
stream->req.input.method = (h2o_iovec_t){H2O_STRLIT("GET")};
stream->req.input.scheme = src_stream->req.input.scheme;
stream->req.input.authority =
h2o_strdup(&stream->req.pool, src_stream->req.input.authority.base, src_stream->req.input.authority.len);
stream->req.input.path = h2o_strdup(&stream->req.pool, abspath, abspath_len);
stream->req.version = 0x200;
{ /* copy headers that may affect the response (of a cacheable response) */
size_t i;
for (i = 0; i != src_stream->req.headers.size; ++i) {
h2o_header_t *src_header = src_stream->req.headers.entries + i;
if (h2o_iovec_is_token(src_header->name)) {
h2o_token_t *token = H2O_STRUCT_FROM_MEMBER(h2o_token_t, buf, src_header->name);
if (token->copy_for_push_request) {
h2o_add_header(&stream->req.pool, &stream->req.headers, token, NULL,
h2o_strdup(&stream->req.pool, src_header->value.base, src_header->value.len).base,
src_header->value.len);
}
}
}
}
execute_or_enqueue_request(conn, stream);
/* send push-promise ASAP (before the parent stream gets closed), even if execute_or_enqueue_request did not trigger the
* invocation of send_headers */
if (!stream->push.promise_sent && stream->state != H2O_HTTP2_STREAM_STATE_END_STREAM)
h2o_http2_stream_send_push_promise(conn, stream);
}
static int foreach_request(h2o_context_t *ctx, int (*cb)(h2o_req_t *req, void *cbdata), void *cbdata)
{
h2o_linklist_t *node;
for (node = ctx->http2._conns.next; node != &ctx->http2._conns; node = node->next) {
h2o_http2_conn_t *conn = H2O_STRUCT_FROM_MEMBER(h2o_http2_conn_t, _conns, node);
h2o_http2_stream_t *stream;
kh_foreach_value(conn->streams, stream, {
int ret = cb(&stream->req, cbdata);
if (ret != 0)
return ret;
});
}
return 0;
}
void h2o_http2_accept(h2o_accept_ctx_t *ctx, h2o_socket_t *sock, struct timeval connected_at)
{
h2o_http2_conn_t *conn = create_conn(ctx->ctx, ctx->hosts, sock, connected_at);
sock->data = conn;
h2o_socket_read_start(conn->sock, on_read);
update_idle_timeout(conn);
if (sock->input->size != 0)
on_read(sock, 0);
}
int h2o_http2_handle_upgrade(h2o_req_t *req, struct timeval connected_at)
{
h2o_http2_conn_t *http2conn = create_conn(req->conn->ctx, req->conn->hosts, NULL, connected_at);
h2o_http2_stream_t *stream;
ssize_t connection_index, settings_index;
h2o_iovec_t settings_decoded;
const char *err_desc;
assert(req->version < 0x200); /* from HTTP/1.x */
/* check that "HTTP2-Settings" is declared in the connection header */
connection_index = h2o_find_header(&req->headers, H2O_TOKEN_CONNECTION, -1);
assert(connection_index != -1);
if (!h2o_contains_token(req->headers.entries[connection_index].value.base, req->headers.entries[connection_index].value.len,
H2O_STRLIT("http2-settings"), ',')) {
goto Error;
}
/* decode the settings */
if ((settings_index = h2o_find_header(&req->headers, H2O_TOKEN_HTTP2_SETTINGS, -1)) == -1) {
goto Error;
}
if ((settings_decoded = h2o_decode_base64url(&req->pool, req->headers.entries[settings_index].value.base,
req->headers.entries[settings_index].value.len))
.base == NULL) {
goto Error;
}
if (h2o_http2_update_peer_settings(&http2conn->peer_settings, (uint8_t *)settings_decoded.base, settings_decoded.len,
&err_desc) != 0) {
goto Error;
}
/* open the stream, now that the function is guaranteed to succeed */
stream = h2o_http2_stream_open(http2conn, 1, req, &h2o_http2_default_priority);
h2o_http2_scheduler_open(&stream->_refs.scheduler, &http2conn->scheduler, h2o_http2_default_priority.weight, 0);
h2o_http2_stream_prepare_for_request(http2conn, stream);
/* send response */
req->res.status = 101;
req->res.reason = "Switching Protocols";
h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_UPGRADE, NULL, H2O_STRLIT("h2c"));
h2o_http1_upgrade(req, (h2o_iovec_t *)&SERVER_PREFACE, 1, on_upgrade_complete, http2conn);
return 0;
Error:
h2o_linklist_unlink(&http2conn->_conns);
kh_destroy(h2o_http2_stream_t, http2conn->streams);
free(http2conn);
return -1;
}
| 1 | 12,782 | Could we use `0x7ffffffe` instead, since `INT32_MAX` is an ID of a _pull_ stream? Other than that the PR seems ready for merge. Thank you for working on the fix. | h2o-h2o | c |
@@ -86,11 +86,11 @@ module Beaker
execute_rake_task("beaker_quickstart:gen_hosts[vmpooler]")
end
- # Print a message to the console and exit with 0
+ # Print a message to the console and exit with 1
# @param [String] msg the message to print
- def self.exit_with(msg)
+ def self.exit_with(msg, exit_code=1)
puts msg
- exit(0)
+ exit(exit_code)
end
# Call the quick start task for the specified hypervisor | 1 | require 'rake'
require 'stringio'
module Beaker
module Subcommands
# Methods used in execution of Subcommands
# - should we execute a subcommand?
# - reset ARGV
# - execute Beaker
# - update a rakefile to require beaker quickstart tasks
# - initialise a rake application
# - execute a rake task
# - execute the vagrant quickstart task
# - execute the vmpooler quickstart task
# - exit with a specific message
# - execute the quick start task for the specified hypervisor
# - capture stdout and stderr
module SubcommandUtil
BEAKER_REQUIRE = "require 'beaker/tasks/quick_start'"
HYPERVISORS = ["vagrant", "vmpooler"]
# Check if the first argument to the beaker execution is a subcommand
# @return [Boolean] true if argv[0] is "help" or a method defined in the Subcommands class, false otherwise
def self.execute_subcommand?(arg0)
return false if arg0.nil?
(Beaker::Subcommand.instance_methods(false) << :help).include? arg0.to_sym
end
# Reset ARGV to contain the arguments determined by a specific subcommand
# @param [Array<String>] args the arguments determined by a specific subcommand
def self.reset_argv(args)
ARGV.clear
args.each do |arg|
ARGV << arg
end
end
# Update ARGV and call Beaker
# @param [Array<String>] args the arguments determined by a specific subcommand
def self.execute_beaker(*args)
reset_argv(args)
Beaker::CLI.new.execute!
end
# Determines what Rakefile to use
# @return [String] the name of the rakefile to use
def self.determine_rake_file()
rake_app.find_rakefile_location() ? rake_app.find_rakefile_location()[0] : "Rakefile"
end
# Check for the presence of a Rakefile containing the require of the
# quick start tasks
def self.require_tasks()
rake_file = determine_rake_file()
FileUtils.touch(rake_file)
unless File.readlines(rake_file).grep(/#{BEAKER_REQUIRE}/).any?
File.open(rake_file, "a+") { |f| f.puts(BEAKER_REQUIRE) }
end
end
# Initialises a rake application
# @return [Object] a rake application
def self.rake_app()
unless @rake_app
ARGV.clear
@rake_app = Rake.application
@rake_app.init
end
@rake_app
end
# Clear ARGV and execute a Rake task
# @param [String] task the rake task to execute
def self.execute_rake_task(task)
rake_app.load_rakefile()
with_captured_output { rake_app.invoke_task(task) }
end
# Execute the quick start task for vagrant
def self.init_vagrant()
execute_rake_task("beaker_quickstart:gen_hosts[vagrant]")
end
# Execute the quick start task for vmpooler
def self.init_vmpooler()
execute_rake_task("beaker_quickstart:gen_hosts[vmpooler]")
end
# Print a message to the console and exit with 0
# @param [String] msg the message to print
def self.exit_with(msg)
puts msg
exit(0)
end
# Call the quick start task for the specified hypervisor
# @param [String] hypervisor the hypervisor we want to query
def self.init_hypervisor(hypervisor)
case hypervisor
when "vagrant"
init_vagrant
when "vmpooler"
init_vmpooler
end
end
# Execute a task but capture stdout and stderr to a buffer
def self.with_captured_output
begin
old_stdout = $stdout.clone
old_stderr = $stderr.clone
$stdout = StringIO.new
$stderr = StringIO.new
yield
ensure
$stdout = old_stdout
$stderr = old_stderr
end
end
end
end
end
| 1 | 14,533 | Might as well add a docstring for the new `exit_code` parameter too. | voxpupuli-beaker | rb |
@@ -77,8 +77,8 @@ public class LocalNewSessionQueuer extends NewSessionQueuer {
}
@Override
- public Optional<HttpRequest> remove() {
- return sessionRequests.poll();
+ public Optional<HttpRequest> remove(RequestId id) {
+ return sessionRequests.poll(id);
}
@Override | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.sessionqueue.local;
import org.openqa.selenium.events.EventBus;
import org.openqa.selenium.grid.config.Config;
import org.openqa.selenium.grid.data.RequestId;
import org.openqa.selenium.grid.log.LoggingOptions;
import org.openqa.selenium.grid.server.EventBusOptions;
import org.openqa.selenium.grid.sessionqueue.GetNewSessionResponse;
import org.openqa.selenium.grid.sessionqueue.NewSessionQueue;
import org.openqa.selenium.grid.sessionqueue.NewSessionQueuer;
import org.openqa.selenium.grid.sessionqueue.config.NewSessionQueueOptions;
import org.openqa.selenium.internal.Require;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import org.openqa.selenium.remote.tracing.Tracer;
import java.time.Duration;
import java.util.Optional;
import java.util.logging.Logger;
public class LocalNewSessionQueuer extends NewSessionQueuer {
private static final Logger LOG = Logger.getLogger(LocalNewSessionQueuer.class.getName());
private final EventBus bus;
public final NewSessionQueue sessionRequests;
public LocalNewSessionQueuer(
Tracer tracer,
EventBus bus,
NewSessionQueue sessionRequests) {
super(tracer);
this.bus = Require.nonNull("Event bus", bus);
this.sessionRequests = Require.nonNull("New Session Request Queue", sessionRequests);
}
public static NewSessionQueuer create(Config config) {
Tracer tracer = new LoggingOptions(config).getTracer();
EventBus bus = new EventBusOptions(config).getEventBus();
Duration retryInterval = new NewSessionQueueOptions(config).getSessionRequestRetryInterval();
Duration requestTimeout = new NewSessionQueueOptions(config).getSessionRequestTimeout();
NewSessionQueue sessionRequests = new LocalNewSessionQueue(
tracer,
bus,
retryInterval,
requestTimeout);
return new LocalNewSessionQueuer(tracer, bus, sessionRequests);
}
@Override
public HttpResponse addToQueue(HttpRequest request) {
validateSessionRequest(request);
GetNewSessionResponse
getNewSessionResponse = new GetNewSessionResponse(tracer, bus, sessionRequests);
return getNewSessionResponse.add(request);
}
@Override
public boolean retryAddToQueue(HttpRequest request, RequestId reqId) {
return sessionRequests.offerFirst(request, reqId);
}
@Override
public Optional<HttpRequest> remove() {
return sessionRequests.poll();
}
@Override
public int clearQueue() {
return sessionRequests.clear();
}
@Override
public boolean isReady() {
return bus.isReady();
}
}
| 1 | 18,216 | `poll` and `remove` have different meanings. | SeleniumHQ-selenium | rb |
@@ -111,6 +111,15 @@ public class TableProperties {
public static final String DELETE_AVRO_COMPRESSION = "write.delete.avro.compression-codec";
public static final String AVRO_COMPRESSION_DEFAULT = "gzip";
+ public static final String PARQUET_BLOOM_FILTER_ENABLED = "write.parquet.bloom-filter-enabled";
+ public static final boolean PARQUET_BLOOM_FILTER_ENABLED_DEFAULT = false;
+
+ public static final String PARQUET_BLOOM_FILTER_EXPECTED_NDV = "write.parquet.bloom-filter-expected-ndv";
+ public static final long PARQUET_BLOOM_FILTER_EXPECTED_NDV_DEFAULT = -1L;
+
+ public static final String PARQUET_BLOOM_FILTER_MAX_BYTES = "write.parquet.bloom-filter-max-bytes";
+ public static final int PARQUET_BLOOM_FILTER_MAX_BYTES_DEFAULT = 1024 * 1024;
+
public static final String SPLIT_SIZE = "read.split.target-size";
public static final long SPLIT_SIZE_DEFAULT = 134217728; // 128 MB
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.Set;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
public class TableProperties {
private TableProperties() {
}
/**
* Reserved table property for table format version.
* <p>
* Iceberg will default a new table's format version to the latest stable and recommended version.
* This reserved property keyword allows users to override the Iceberg format version of the table metadata.
* <p>
* If this table property exists when creating a table, the table will use the specified format version.
* If a table updates this property, it will try to upgrade to the specified format version.
* <p>
* Note: incomplete or unstable versions cannot be selected using this property.
*/
public static final String FORMAT_VERSION = "format-version";
/**
* Reserved Iceberg table properties list.
* <p>
* Reserved table properties are only used to control behaviors when creating or updating a table.
* The value of these properties are not persisted as a part of the table metadata.
*/
public static final Set<String> RESERVED_PROPERTIES = ImmutableSet.of(
FORMAT_VERSION
);
public static final String COMMIT_NUM_RETRIES = "commit.retry.num-retries";
public static final int COMMIT_NUM_RETRIES_DEFAULT = 4;
public static final String COMMIT_MIN_RETRY_WAIT_MS = "commit.retry.min-wait-ms";
public static final int COMMIT_MIN_RETRY_WAIT_MS_DEFAULT = 100;
public static final String COMMIT_MAX_RETRY_WAIT_MS = "commit.retry.max-wait-ms";
public static final int COMMIT_MAX_RETRY_WAIT_MS_DEFAULT = 60000; // 1 minute
public static final String COMMIT_TOTAL_RETRY_TIME_MS = "commit.retry.total-timeout-ms";
public static final int COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT = 1800000; // 30 minutes
public static final String COMMIT_NUM_STATUS_CHECKS = "commit.status-check.num-retries";
public static final int COMMIT_NUM_STATUS_CHECKS_DEFAULT = 3;
public static final String COMMIT_STATUS_CHECKS_MIN_WAIT_MS = "commit.status-check.min-wait-ms";
public static final long COMMIT_STATUS_CHECKS_MIN_WAIT_MS_DEFAULT = 1000L; // 1s
public static final String COMMIT_STATUS_CHECKS_MAX_WAIT_MS = "commit.status-check.max-wait-ms";
public static final long COMMIT_STATUS_CHECKS_MAX_WAIT_MS_DEFAULT = 60000L; // 1 minute
public static final String COMMIT_STATUS_CHECKS_TOTAL_WAIT_MS = "commit.status-check.total-timeout-ms";
public static final long COMMIT_STATUS_CHECKS_TOTAL_WAIT_MS_DEFAULT = 1800000; // 30 minutes
public static final String MANIFEST_TARGET_SIZE_BYTES = "commit.manifest.target-size-bytes";
public static final long MANIFEST_TARGET_SIZE_BYTES_DEFAULT = 8388608; // 8 MB
public static final String MANIFEST_MIN_MERGE_COUNT = "commit.manifest.min-count-to-merge";
public static final int MANIFEST_MIN_MERGE_COUNT_DEFAULT = 100;
public static final String MANIFEST_MERGE_ENABLED = "commit.manifest-merge.enabled";
public static final boolean MANIFEST_MERGE_ENABLED_DEFAULT = true;
public static final String DEFAULT_FILE_FORMAT = "write.format.default";
public static final String DELETE_DEFAULT_FILE_FORMAT = "write.delete.format.default";
public static final String DEFAULT_FILE_FORMAT_DEFAULT = "parquet";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES = "write.parquet.row-group-size-bytes";
public static final String DELETE_PARQUET_ROW_GROUP_SIZE_BYTES = "write.delete.parquet.row-group-size-bytes";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT = "134217728"; // 128 MB
public static final String PARQUET_PAGE_SIZE_BYTES = "write.parquet.page-size-bytes";
public static final String DELETE_PARQUET_PAGE_SIZE_BYTES = "write.delete.parquet.page-size-bytes";
public static final String PARQUET_PAGE_SIZE_BYTES_DEFAULT = "1048576"; // 1 MB
public static final String PARQUET_DICT_SIZE_BYTES = "write.parquet.dict-size-bytes";
public static final String DELETE_PARQUET_DICT_SIZE_BYTES = "write.delete.parquet.dict-size-bytes";
public static final String PARQUET_DICT_SIZE_BYTES_DEFAULT = "2097152"; // 2 MB
public static final String PARQUET_COMPRESSION = "write.parquet.compression-codec";
public static final String DELETE_PARQUET_COMPRESSION = "write.delete.parquet.compression-codec";
public static final String PARQUET_COMPRESSION_DEFAULT = "gzip";
public static final String PARQUET_COMPRESSION_LEVEL = "write.parquet.compression-level";
public static final String DELETE_PARQUET_COMPRESSION_LEVEL = "write.delete.parquet.compression-level";
public static final String PARQUET_COMPRESSION_LEVEL_DEFAULT = null;
public static final String AVRO_COMPRESSION = "write.avro.compression-codec";
public static final String DELETE_AVRO_COMPRESSION = "write.delete.avro.compression-codec";
public static final String AVRO_COMPRESSION_DEFAULT = "gzip";
public static final String SPLIT_SIZE = "read.split.target-size";
public static final long SPLIT_SIZE_DEFAULT = 134217728; // 128 MB
public static final String METADATA_SPLIT_SIZE = "read.split.metadata-target-size";
public static final long METADATA_SPLIT_SIZE_DEFAULT = 32 * 1024 * 1024; // 32 MB
public static final String SPLIT_LOOKBACK = "read.split.planning-lookback";
public static final int SPLIT_LOOKBACK_DEFAULT = 10;
public static final String SPLIT_OPEN_FILE_COST = "read.split.open-file-cost";
public static final long SPLIT_OPEN_FILE_COST_DEFAULT = 4 * 1024 * 1024; // 4MB
public static final String PARQUET_VECTORIZATION_ENABLED = "read.parquet.vectorization.enabled";
public static final boolean PARQUET_VECTORIZATION_ENABLED_DEFAULT = false;
public static final String PARQUET_BATCH_SIZE = "read.parquet.vectorization.batch-size";
public static final int PARQUET_BATCH_SIZE_DEFAULT = 5000;
public static final String ORC_VECTORIZATION_ENABLED = "read.orc.vectorization.enabled";
public static final boolean ORC_VECTORIZATION_ENABLED_DEFAULT = false;
public static final String ORC_BATCH_SIZE = "read.orc.vectorization.batch-size";
public static final int ORC_BATCH_SIZE_DEFAULT = 5000;
public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled";
public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false;
/**
* @deprecated Use {@link #WRITE_DATA_LOCATION} instead.
*/
@Deprecated
public static final String OBJECT_STORE_PATH = "write.object-storage.path";
public static final String WRITE_LOCATION_PROVIDER_IMPL = "write.location-provider.impl";
/**
* @deprecated Use {@link #WRITE_DATA_LOCATION} instead.
*/
@Deprecated
public static final String WRITE_FOLDER_STORAGE_LOCATION = "write.folder-storage.path";
/**
* @deprecated will be removed in 0.14.0, use {@link #WRITE_DATA_LOCATION} instead
*/
@Deprecated
public static final String WRITE_NEW_DATA_LOCATION = "write.folder-storage.path";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "data" folder underneath the root path of the table.
public static final String WRITE_DATA_LOCATION = "write.data.path";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "metadata" folder underneath the root path of the table.
public static final String WRITE_METADATA_LOCATION = "write.metadata.path";
public static final String WRITE_PARTITION_SUMMARY_LIMIT = "write.summary.partition-limit";
public static final int WRITE_PARTITION_SUMMARY_LIMIT_DEFAULT = 0;
public static final String MANIFEST_LISTS_ENABLED = "write.manifest-lists.enabled";
public static final boolean MANIFEST_LISTS_ENABLED_DEFAULT = true;
public static final String METADATA_COMPRESSION = "write.metadata.compression-codec";
public static final String METADATA_COMPRESSION_DEFAULT = "none";
public static final String METADATA_PREVIOUS_VERSIONS_MAX = "write.metadata.previous-versions-max";
public static final int METADATA_PREVIOUS_VERSIONS_MAX_DEFAULT = 100;
// This enables to delete the oldest metadata file after commit.
public static final String METADATA_DELETE_AFTER_COMMIT_ENABLED = "write.metadata.delete-after-commit.enabled";
public static final boolean METADATA_DELETE_AFTER_COMMIT_ENABLED_DEFAULT = false;
public static final String METRICS_MODE_COLUMN_CONF_PREFIX = "write.metadata.metrics.column.";
public static final String DEFAULT_WRITE_METRICS_MODE = "write.metadata.metrics.default";
public static final String DEFAULT_WRITE_METRICS_MODE_DEFAULT = "truncate(16)";
public static final String DEFAULT_NAME_MAPPING = "schema.name-mapping.default";
public static final String WRITE_AUDIT_PUBLISH_ENABLED = "write.wap.enabled";
public static final String WRITE_AUDIT_PUBLISH_ENABLED_DEFAULT = "false";
public static final String WRITE_TARGET_FILE_SIZE_BYTES = "write.target-file-size-bytes";
public static final String DELETE_TARGET_FILE_SIZE_BYTES = "write.delete.target-file-size-bytes";
public static final long WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT = 536870912; // 512 MB
public static final String SPARK_WRITE_PARTITIONED_FANOUT_ENABLED = "write.spark.fanout.enabled";
public static final boolean SPARK_WRITE_PARTITIONED_FANOUT_ENABLED_DEFAULT = false;
public static final String SNAPSHOT_ID_INHERITANCE_ENABLED = "compatibility.snapshot-id-inheritance.enabled";
public static final boolean SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT = false;
public static final String ENGINE_HIVE_ENABLED = "engine.hive.enabled";
public static final boolean ENGINE_HIVE_ENABLED_DEFAULT = false;
public static final String WRITE_DISTRIBUTION_MODE = "write.distribution-mode";
public static final String WRITE_DISTRIBUTION_MODE_NONE = "none";
public static final String WRITE_DISTRIBUTION_MODE_HASH = "hash";
public static final String WRITE_DISTRIBUTION_MODE_RANGE = "range";
public static final String WRITE_DISTRIBUTION_MODE_DEFAULT = WRITE_DISTRIBUTION_MODE_NONE;
public static final String GC_ENABLED = "gc.enabled";
public static final boolean GC_ENABLED_DEFAULT = true;
public static final String MAX_SNAPSHOT_AGE_MS = "history.expire.max-snapshot-age-ms";
public static final long MAX_SNAPSHOT_AGE_MS_DEFAULT = 5 * 24 * 60 * 60 * 1000; // 5 days
public static final String MIN_SNAPSHOTS_TO_KEEP = "history.expire.min-snapshots-to-keep";
public static final int MIN_SNAPSHOTS_TO_KEEP_DEFAULT = 1;
public static final String DELETE_ISOLATION_LEVEL = "write.delete.isolation-level";
public static final String DELETE_ISOLATION_LEVEL_DEFAULT = "serializable";
public static final String DELETE_MODE = "write.delete.mode";
public static final String DELETE_MODE_DEFAULT = "copy-on-write";
public static final String DELETE_DISTRIBUTION_MODE = "write.delete.distribution-mode";
public static final String UPDATE_ISOLATION_LEVEL = "write.update.isolation-level";
public static final String UPDATE_ISOLATION_LEVEL_DEFAULT = "serializable";
public static final String UPDATE_MODE = "write.update.mode";
public static final String UPDATE_MODE_DEFAULT = "copy-on-write";
public static final String UPDATE_DISTRIBUTION_MODE = "write.update.distribution-mode";
public static final String MERGE_ISOLATION_LEVEL = "write.merge.isolation-level";
public static final String MERGE_ISOLATION_LEVEL_DEFAULT = "serializable";
public static final String MERGE_MODE = "write.merge.mode";
public static final String MERGE_MODE_DEFAULT = "copy-on-write";
public static final String MERGE_CARDINALITY_CHECK_ENABLED = "write.merge.cardinality-check.enabled";
public static final boolean MERGE_CARDINALITY_CHECK_ENABLED_DEFAULT = true;
public static final String MERGE_DISTRIBUTION_MODE = "write.merge.distribution-mode";
public static final String UPSERT_ENABLED = "write.upsert.enabled";
public static final boolean UPSERT_ENABLED_DEFAULT = false;
}
| 1 | 37,876 | What's the perf impact of writing bloom filer? Does it make sense to enable it by default if the perf impact is minor? Would be nice to include benchmarks? | apache-iceberg | java |
@@ -1,4 +1,4 @@
-//snippet-sourcedescription:[DetachRolePolicy.java demonstrates how to detach a policy from an AWS Identity and Access Management (IAM) role.]
+//snippet-sourcedescription:[DetachRolePolicy.java demonstrates how to detach a policy from an AWS Identity and Access Management (AWS IAM) role.]
//snippet-keyword:[AWS SDK for Java v2]
//snippet-keyword:[Code Sample]
//snippet-service:[AWS IAM] | 1 | //snippet-sourcedescription:[DetachRolePolicy.java demonstrates how to detach a policy from an AWS Identity and Access Management (IAM) role.]
//snippet-keyword:[AWS SDK for Java v2]
//snippet-keyword:[Code Sample]
//snippet-service:[AWS IAM]
//snippet-sourcetype:[full-example]
//snippet-sourcedate:[11/02/2020]
//snippet-sourceauthor:[scmacdon-aws]
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package com.example.iam;
// snippet-start:[iam.java2.detach_role_policy.import]
import software.amazon.awssdk.services.iam.model.DetachRolePolicyRequest;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.iam.IamClient;
import software.amazon.awssdk.services.iam.model.IamException;
// snippet-end:[iam.java2.detach_role_policy.import]
public class DetachRolePolicy {
public static void main(String[] args) {
final String USAGE = "\n" +
"Usage:\n" +
" DetachRolePolicy <roleName> <policyArn> \n\n" +
"Where:\n" +
" roleName - a role name that you can obtain from the AWS Console. \n\n" +
" policyArn - a policy ARN that you can obtain from the AWS Console. \n\n" ;
if (args.length != 2) {
System.out.println(USAGE);
System.exit(1);
}
// Read the command line arguments
String roleName = args[0];
String policyArn = args[1];
Region region = Region.AWS_GLOBAL;
IamClient iam = IamClient.builder()
.region(region)
.build();
detachPolicy(iam, roleName, policyArn);
System.out.println("Done");
iam.close();
}
// snippet-start:[iam.java2.detach_role_policy.main]
public static void detachPolicy(IamClient iam, String roleName, String policyArn ) {
try {
DetachRolePolicyRequest request = DetachRolePolicyRequest.builder()
.roleName(roleName)
.policyArn(policyArn)
.build();
iam.detachRolePolicy(request);
System.out.println("Successfully detached policy " + policyArn +
" from role " + roleName);
} catch (IamException e) {
System.err.println(e.awsErrorDetails().errorMessage());
System.exit(1);
}
// snippet-end:[iam.java2.detach_role_policy.main]
}
}
| 1 | 18,241 | AWS Identity and Access Management (IAM) | awsdocs-aws-doc-sdk-examples | rb |
@@ -162,13 +162,13 @@ const std::map<llvm::StringRef, hipCounter> CUDA_DRIVER_FUNCTION_MAP{
{"cuMemAlloc", {"hipMalloc", "", CONV_MEMORY, API_DRIVER}},
{"cuMemAlloc_v2", {"hipMalloc", "", CONV_MEMORY, API_DRIVER}},
// cudaHostAlloc
- {"cuMemAllocHost", {"hipMemAllocHost", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
+ {"cuMemAllocHost", {"hipMemAllocHost", "", CONV_MEMORY, API_DRIVER}},
{"cuMemAllocHost_v2", {"hipMemAllocHost", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// cudaMallocManaged
{"cuMemAllocManaged", {"hipMemAllocManaged", "", CONV_MEMORY, API_DRIVER}},
// no analogue
// NOTE: Not equal to cudaMallocPitch due to different signatures
- {"cuMemAllocPitch", {"hipMemAllocPitch", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
+ {"cuMemAllocPitch", {"hipMemAllocPitch", "", CONV_MEMORY, API_DRIVER}},
{"cuMemAllocPitch_v2", {"hipMemAllocPitch", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpy due to different signatures | 1 | /*
Copyright (c) 2015 - present Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include "CUDA2HIP.h"
// Map of all CUDA Driver API functions
const std::map<llvm::StringRef, hipCounter> CUDA_DRIVER_FUNCTION_MAP{
// 5.2. Error Handling
// no analogue
// NOTE: cudaGetErrorName and cuGetErrorName have different signatures
{"cuGetErrorName", {"hipGetErrorName_", "", CONV_ERROR, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: cudaGetErrorString and cuGetErrorString have different signatures
{"cuGetErrorString", {"hipGetErrorString_", "", CONV_ERROR, API_DRIVER, HIP_UNSUPPORTED}},
// 5.3. Initialization
// no analogue
{"cuInit", {"hipInit", "", CONV_INIT, API_DRIVER}},
// 5.4 Version Management
// cudaDriverGetVersion
{"cuDriverGetVersion", {"hipDriverGetVersion", "", CONV_VERSION, API_DRIVER}},
// 5.5. Device Management
// cudaGetDevice
// NOTE: cudaGetDevice has additional attr: int ordinal
{"cuDeviceGet", {"hipGetDevice", "", CONV_DEVICE, API_DRIVER}},
// cudaDeviceGetAttribute
{"cuDeviceGetAttribute", {"hipDeviceGetAttribute", "", CONV_DEVICE, API_DRIVER}},
// cudaGetDeviceCount
{"cuDeviceGetCount", {"hipGetDeviceCount", "", CONV_DEVICE, API_DRIVER}},
// no analogue
{"cuDeviceGetLuid", {"hipDeviceGetLuid", "", CONV_DEVICE, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuDeviceGetName", {"hipDeviceGetName", "", CONV_DEVICE, API_DRIVER}},
// no analogue
{"cuDeviceGetUuid", {"hipDeviceGetUuid", "", CONV_DEVICE, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuDeviceTotalMem", {"hipDeviceTotalMem", "", CONV_DEVICE, API_DRIVER}},
{"cuDeviceTotalMem_v2", {"hipDeviceTotalMem", "", CONV_DEVICE, API_DRIVER}},
// 5.6. Device Management [DEPRECATED]
{"cuDeviceComputeCapability", {"hipDeviceComputeCapability", "", CONV_DEVICE, API_DRIVER}},
// no analogue
// NOTE: Not equal to cudaGetDeviceProperties due to different attributes: cudaDeviceProp and CUdevprop
{"cuDeviceGetProperties", {"hipGetDeviceProperties_", "", CONV_DEVICE, API_DRIVER, HIP_UNSUPPORTED}},
// 5.7. Primary Context Management
// no analogues
{"cuDevicePrimaryCtxGetState", {"hipDevicePrimaryCtxGetState", "", CONV_CONTEXT, API_DRIVER}},
{"cuDevicePrimaryCtxRelease", {"hipDevicePrimaryCtxRelease", "", CONV_CONTEXT, API_DRIVER}},
{"cuDevicePrimaryCtxReset", {"hipDevicePrimaryCtxReset", "", CONV_CONTEXT, API_DRIVER}},
{"cuDevicePrimaryCtxRetain", {"hipDevicePrimaryCtxRetain", "", CONV_CONTEXT, API_DRIVER}},
{"cuDevicePrimaryCtxSetFlags", {"hipDevicePrimaryCtxSetFlags", "", CONV_CONTEXT, API_DRIVER}},
// 5.8. Context Management
// no analogues, except a few
{"cuCtxCreate", {"hipCtxCreate", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxCreate_v2", {"hipCtxCreate", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxDestroy", {"hipCtxDestroy", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxDestroy_v2", {"hipCtxDestroy", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxGetApiVersion", {"hipCtxGetApiVersion", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxGetCacheConfig", {"hipCtxGetCacheConfig", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxGetCurrent", {"hipCtxGetCurrent", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxGetDevice", {"hipCtxGetDevice", "", CONV_CONTEXT, API_DRIVER}},
// cudaGetDeviceFlags
// TODO: rename to hipGetDeviceFlags
{"cuCtxGetFlags", {"hipCtxGetFlags", "", CONV_CONTEXT, API_DRIVER}},
// cudaDeviceGetLimit
{"cuCtxGetLimit", {"hipDeviceGetLimit", "", CONV_CONTEXT, API_DRIVER}},
// cudaDeviceGetSharedMemConfig
// TODO: rename to hipDeviceGetSharedMemConfig
{"cuCtxGetSharedMemConfig", {"hipCtxGetSharedMemConfig", "", CONV_CONTEXT, API_DRIVER}},
// cudaDeviceGetStreamPriorityRange
{"cuCtxGetStreamPriorityRange", {"hipDeviceGetStreamPriorityRange", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxPopCurrent", {"hipCtxPopCurrent", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxPopCurrent_v2", {"hipCtxPopCurrent", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxPushCurrent", {"hipCtxPushCurrent", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxPushCurrent_v2", {"hipCtxPushCurrent", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxSetCacheConfig", {"hipCtxSetCacheConfig", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxSetCurrent", {"hipCtxSetCurrent", "", CONV_CONTEXT, API_DRIVER}},
// cudaDeviceSetLimit
{"cuCtxSetLimit", {"hipDeviceSetLimit", "", CONV_CONTEXT, API_DRIVER}},
// cudaDeviceSetSharedMemConfig
// TODO: rename to hipDeviceSetSharedMemConfig
{"cuCtxSetSharedMemConfig", {"hipCtxSetSharedMemConfig", "", CONV_CONTEXT, API_DRIVER}},
// cudaDeviceSynchronize
// TODO: rename to hipDeviceSynchronize
{"cuCtxSynchronize", {"hipCtxSynchronize", "", CONV_CONTEXT, API_DRIVER}},
// 5.9. Context Management [DEPRECATED]
// no analogues
{"cuCtxAttach", {"hipCtxAttach", "", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED}},
{"cuCtxDetach", {"hipCtxDetach", "", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED}},
// 5.10. Module Management
// no analogues
{"cuLinkAddData", {"hipLinkAddData", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuLinkAddData_v2", {"hipLinkAddData", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuLinkAddFile", {"hipLinkAddFile", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuLinkAddFile_v2", {"hipLinkAddFile", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuLinkComplete", {"hipLinkComplete", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuLinkCreate", {"hipLinkCreate", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuLinkCreate_v2", {"hipLinkCreate", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuLinkDestroy", {"hipLinkDestroy", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuModuleGetFunction", {"hipModuleGetFunction", "", CONV_MODULE, API_DRIVER}},
{"cuModuleGetGlobal", {"hipModuleGetGlobal", "", CONV_MODULE, API_DRIVER}},
{"cuModuleGetGlobal_v2", {"hipModuleGetGlobal", "", CONV_MODULE, API_DRIVER}},
{"cuModuleGetSurfRef", {"hipModuleGetSurfRef", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuModuleGetTexRef", {"hipModuleGetTexRef", "", CONV_MODULE, API_DRIVER}},
{"cuModuleLoad", {"hipModuleLoad", "", CONV_MODULE, API_DRIVER}},
{"cuModuleLoadData", {"hipModuleLoadData", "", CONV_MODULE, API_DRIVER}},
{"cuModuleLoadDataEx", {"hipModuleLoadDataEx", "", CONV_MODULE, API_DRIVER}},
{"cuModuleLoadFatBinary", {"hipModuleLoadFatBinary", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuModuleUnload", {"hipModuleUnload", "", CONV_MODULE, API_DRIVER}},
// 5.11. Memory Management
// no analogue
{"cuArray3DCreate", {"hipArray3DCreate", "", CONV_MEMORY, API_DRIVER}},
{"cuArray3DCreate_v2", {"hipArray3DCreate", "", CONV_MEMORY, API_DRIVER}},
{"cuArray3DGetDescriptor", {"hipArray3DGetDescriptor", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuArray3DGetDescriptor_v2", {"hipArray3DGetDescriptor", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuArrayCreate", {"hipArrayCreate", "", CONV_MEMORY, API_DRIVER}},
{"cuArrayCreate_v2", {"hipArrayCreate", "", CONV_MEMORY, API_DRIVER}},
{"cuArrayDestroy", {"hipArrayDestroy", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuArrayGetDescriptor", {"hipArrayGetDescriptor", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuArrayGetDescriptor_v2", {"hipArrayGetDescriptor", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// cudaDeviceGetByPCIBusId
{"cuDeviceGetByPCIBusId", {"hipDeviceGetByPCIBusId", "", CONV_MEMORY, API_DRIVER}},
// cudaDeviceGetPCIBusId
{"cuDeviceGetPCIBusId", {"hipDeviceGetPCIBusId", "", CONV_MEMORY, API_DRIVER}},
// cudaIpcCloseMemHandle
{"cuIpcCloseMemHandle", {"hipIpcCloseMemHandle", "", CONV_MEMORY, API_DRIVER}},
// cudaIpcGetEventHandle
{"cuIpcGetEventHandle", {"hipIpcGetEventHandle", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// cudaIpcGetMemHandle
{"cuIpcGetMemHandle", {"hipIpcGetMemHandle", "", CONV_MEMORY, API_DRIVER}},
// cudaIpcOpenEventHandle
{"cuIpcOpenEventHandle", {"hipIpcOpenEventHandle", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// cudaIpcOpenMemHandle
{"cuIpcOpenMemHandle", {"hipIpcOpenMemHandle", "", CONV_MEMORY, API_DRIVER}},
// cudaMalloc
{"cuMemAlloc", {"hipMalloc", "", CONV_MEMORY, API_DRIVER}},
{"cuMemAlloc_v2", {"hipMalloc", "", CONV_MEMORY, API_DRIVER}},
// cudaHostAlloc
{"cuMemAllocHost", {"hipMemAllocHost", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemAllocHost_v2", {"hipMemAllocHost", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// cudaMallocManaged
{"cuMemAllocManaged", {"hipMemAllocManaged", "", CONV_MEMORY, API_DRIVER}},
// no analogue
// NOTE: Not equal to cudaMallocPitch due to different signatures
{"cuMemAllocPitch", {"hipMemAllocPitch", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemAllocPitch_v2", {"hipMemAllocPitch", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpy due to different signatures
{"cuMemcpy", {"hipMemcpy_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpy2D due to different signatures
{"cuMemcpy2D", {"hipMemcpyParam2D", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpy2D_v2", {"hipMemcpyParam2D", "", CONV_MEMORY, API_DRIVER}},
// no analogue
// NOTE: Not equal to cudaMemcpy2DAsync/hipMemcpy2DAsync due to different signatures
{"cuMemcpy2DAsync", {"hipMemcpyParam2DAsync", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpy2DAsync_v2", {"hipMemcpyParam2DAsync", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemcpy2DUnaligned", {"hipMemcpy2DUnaligned", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpy2DUnaligned_v2", {"hipMemcpy2DUnaligned", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpy3D due to different signatures
{"cuMemcpy3D", {"hipMemcpy3D_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpy3D_v2", {"hipMemcpy3D_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpy3DAsync due to different signatures
{"cuMemcpy3DAsync", {"hipMemcpy3DAsync_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpy3DAsync_v2", {"hipMemcpy3DAsync_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpy3DPeer due to different signatures
{"cuMemcpy3DPeer", {"hipMemcpy3DPeer_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpy3DPeerAsync due to different signatures
{"cuMemcpy3DPeerAsync", {"hipMemcpy3DPeerAsync_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpyAsync due to different signatures
{"cuMemcpyAsync", {"hipMemcpyAsync_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpyArrayToArray due to different signatures
{"cuMemcpyAtoA", {"hipMemcpyAtoA", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpyAtoA_v2", {"hipMemcpyAtoA", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemcpyAtoD", {"hipMemcpyAtoD", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpyAtoD_v2", {"hipMemcpyAtoD", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemcpyAtoH", {"hipMemcpyAtoH", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpyAtoH_v2", {"hipMemcpyAtoH", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemcpyAtoHAsync", {"hipMemcpyAtoHAsync", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpyAtoHAsync_v2", {"hipMemcpyAtoHAsync", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemcpyDtoA", {"hipMemcpyDtoA", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpyDtoA_v2", {"hipMemcpyDtoA", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemcpyDtoD", {"hipMemcpyDtoD", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpyDtoD_v2", {"hipMemcpyDtoD", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemcpyDtoDAsync", {"hipMemcpyDtoDAsync", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpyDtoDAsync_v2", {"hipMemcpyDtoDAsync", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemcpyDtoH", {"hipMemcpyDtoH", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpyDtoH_v2", {"hipMemcpyDtoH", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemcpyDtoHAsync", {"hipMemcpyDtoHAsync", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpyDtoHAsync_v2", {"hipMemcpyDtoHAsync", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemcpyHtoA", {"hipMemcpyHtoA", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpyHtoA_v2", {"hipMemcpyHtoA", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemcpyHtoAAsync", {"hipMemcpyHtoAAsync", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpyHtoAAsync_v2", {"hipMemcpyHtoAAsync", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemcpyHtoD", {"hipMemcpyHtoD", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpyHtoD_v2", {"hipMemcpyHtoD", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemcpyHtoDAsync", {"hipMemcpyHtoDAsync", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpyHtoDAsync_v2", {"hipMemcpyHtoDAsync", "", CONV_MEMORY, API_DRIVER}},
// no analogue
// NOTE: Not equal to cudaMemcpyPeer due to different signatures
{"cuMemcpyPeer", {"hipMemcpyPeer_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpyPeerAsync due to different signatures
{"cuMemcpyPeerAsync", {"hipMemcpyPeerAsync_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// cudaFree
{"cuMemFree", {"hipFree", "", CONV_MEMORY, API_DRIVER}},
{"cuMemFree_v2", {"hipFree", "", CONV_MEMORY, API_DRIVER}},
// cudaFreeHost
{"cuMemFreeHost", {"hipHostFree", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemGetAddressRange", {"hipMemGetAddressRange", "", CONV_MEMORY, API_DRIVER}},
{"cuMemGetAddressRange_v2", {"hipMemGetAddressRange", "", CONV_MEMORY, API_DRIVER}},
// cudaMemGetInfo
{"cuMemGetInfo", {"hipMemGetInfo", "", CONV_MEMORY, API_DRIVER}},
{"cuMemGetInfo_v2", {"hipMemGetInfo", "", CONV_MEMORY, API_DRIVER}},
// cudaHostAlloc
{"cuMemHostAlloc", {"hipHostMalloc", "", CONV_MEMORY, API_DRIVER}},
// cudaHostGetDevicePointer
{"cuMemHostGetDevicePointer", {"hipHostGetDevicePointer", "", CONV_MEMORY, API_DRIVER}},
{"cuMemHostGetDevicePointer_v2", {"hipHostGetDevicePointer", "", CONV_MEMORY, API_DRIVER}},
// cudaHostGetFlags
{"cuMemHostGetFlags", {"hipMemHostGetFlags", "", CONV_MEMORY, API_DRIVER}},
// cudaHostRegister
{"cuMemHostRegister", {"hipHostRegister", "", CONV_MEMORY, API_DRIVER}},
{"cuMemHostRegister_v2", {"hipHostRegister", "", CONV_MEMORY, API_DRIVER}},
// cudaHostUnregister
{"cuMemHostUnregister", {"hipHostUnregister", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemsetD16", {"hipMemsetD16", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemsetD16_v2", {"hipMemsetD16", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemsetD16Async", {"hipMemsetD16Async", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemsetD2D16", {"hipMemsetD2D16", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemsetD2D16_v2", {"hipMemsetD2D16", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemsetD2D16Async", {"hipMemsetD2D16Async", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemsetD2D32", {"hipMemsetD2D32", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemsetD2D32_v2", {"hipMemsetD2D32", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemsetD2D32Async", {"hipMemsetD2D32Async", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemsetD2D8", {"hipMemsetD2D8", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemsetD2D8_v2", {"hipMemsetD2D8", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemsetD2D8Async", {"hipMemsetD2D8Async", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// cudaMemset
{"cuMemsetD32", {"hipMemsetD32", "", CONV_MEMORY, API_DRIVER}},
{"cuMemsetD32_v2", {"hipMemsetD32", "", CONV_MEMORY, API_DRIVER}},
// cudaMemsetAsync
{"cuMemsetD32Async", {"hipMemsetD32Async", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemsetD8", {"hipMemsetD8", "", CONV_MEMORY, API_DRIVER}},
{"cuMemsetD8_v2", {"hipMemsetD8", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemsetD8Async", {"hipMemsetD8Async", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMallocMipmappedArray due to different signatures
{"cuMipmappedArrayCreate", {"hipMipmappedArrayCreate", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaFreeMipmappedArray due to different signatures
{"cuMipmappedArrayDestroy", {"hipMipmappedArrayDestroy", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaGetMipmappedArrayLevel due to different signatures
{"cuMipmappedArrayGetLevel", {"hipMipmappedArrayGetLevel", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// 5.12. Unified Addressing
// cudaMemAdvise
{"cuMemAdvise", {"hipMemAdvise", "", CONV_ADDRESSING, API_DRIVER, HIP_UNSUPPORTED}},
// TODO: double check cudaMemPrefetchAsync
{"cuMemPrefetchAsync", {"hipMemPrefetchAsync_", "", CONV_ADDRESSING, API_DRIVER, HIP_UNSUPPORTED}},
// cudaMemRangeGetAttribute
{"cuMemRangeGetAttribute", {"hipMemRangeGetAttribute", "", CONV_ADDRESSING, API_DRIVER, HIP_UNSUPPORTED}},
// cudaMemRangeGetAttributes
{"cuMemRangeGetAttributes", {"hipMemRangeGetAttributes", "", CONV_ADDRESSING, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuPointerGetAttribute", {"hipPointerGetAttribute", "", CONV_ADDRESSING, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaPointerGetAttributes due to different signatures
{"cuPointerGetAttributes", {"hipPointerGetAttributes", "", CONV_ADDRESSING, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuPointerSetAttribute", {"hipPointerSetAttribute", "", CONV_ADDRESSING, API_DRIVER, HIP_UNSUPPORTED}},
// 5.13. Stream Management
// cudaStreamAddCallback
{"cuStreamAddCallback", {"hipStreamAddCallback", "", CONV_STREAM, API_DRIVER}},
// cudaStreamAttachMemAsync
{"cuStreamAttachMemAsync", {"hipStreamAttachMemAsync", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
// cudaStreamBeginCapture
{"cuStreamBeginCapture", {"hipStreamBeginCapture", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
{"cuStreamBeginCapture_v2", {"hipStreamBeginCapture", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
{"cuStreamBeginCapture_ptsz", {"hipStreamBeginCapture", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
// cudaStreamCreateWithFlags
{"cuStreamCreate", {"hipStreamCreateWithFlags", "", CONV_STREAM, API_DRIVER}},
// cudaStreamCreateWithPriority
{"cuStreamCreateWithPriority", {"hipStreamCreateWithPriority", "", CONV_STREAM, API_DRIVER}},
// cudaStreamDestroy
{"cuStreamDestroy", {"hipStreamDestroy", "", CONV_STREAM, API_DRIVER}},
{"cuStreamDestroy_v2", {"hipStreamDestroy", "", CONV_STREAM, API_DRIVER}},
// cudaStreamEndCapture
{"cuStreamEndCapture", {"hipStreamEndCapture", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
// cudaStreamGetCaptureInfo
{"cuStreamGetCaptureInfo", {"hipStreamGetCaptureInfo", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuStreamGetCtx", {"hipStreamGetContext", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
// cudaStreamGetFlags
{"cuStreamGetFlags", {"hipStreamGetFlags", "", CONV_STREAM, API_DRIVER}},
// cudaStreamGetPriority
{"cuStreamGetPriority", {"hipStreamGetPriority", "", CONV_STREAM, API_DRIVER}},
// cudaStreamIsCapturing
{"cuStreamIsCapturing", {"hipStreamIsCapturing", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
// cudaStreamQuery
{"cuStreamQuery", {"hipStreamQuery", "", CONV_STREAM, API_DRIVER}},
// cudaStreamSynchronize
{"cuStreamSynchronize", {"hipStreamSynchronize", "", CONV_STREAM, API_DRIVER}},
// cudaStreamWaitEvent
{"cuStreamWaitEvent", {"hipStreamWaitEvent", "", CONV_STREAM, API_DRIVER}},
// cudaThreadExchangeStreamCaptureMode
{"cuThreadExchangeStreamCaptureMode", {"hipThreadExchangeStreamCaptureMode", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
// 5.14. Event Management
// cudaEventCreateWithFlags
{"cuEventCreate", {"hipEventCreateWithFlags", "", CONV_EVENT, API_DRIVER}},
// cudaEventDestroy
{"cuEventDestroy", {"hipEventDestroy", "", CONV_EVENT, API_DRIVER}},
{"cuEventDestroy_v2", {"hipEventDestroy", "", CONV_EVENT, API_DRIVER}},
// cudaEventElapsedTime
{"cuEventElapsedTime", {"hipEventElapsedTime", "", CONV_EVENT, API_DRIVER}},
// cudaEventQuery
{"cuEventQuery", {"hipEventQuery", "", CONV_EVENT, API_DRIVER}},
// cudaEventRecord
{"cuEventRecord", {"hipEventRecord", "", CONV_EVENT, API_DRIVER}},
// cudaEventSynchronize
{"cuEventSynchronize", {"hipEventSynchronize", "", CONV_EVENT, API_DRIVER}},
// 5.15. External Resource Interoperability
// cudaDestroyExternalMemory
{"cuDestroyExternalMemory", {"hipDestroyExternalMemory", "", CONV_EXT_RES, API_DRIVER, HIP_UNSUPPORTED}},
// cudaDestroyExternalSemaphore
{"cuDestroyExternalSemaphore", {"hipDestroyExternalSemaphore", "", CONV_EXT_RES, API_DRIVER, HIP_UNSUPPORTED}},
// cudaExternalMemoryGetMappedBuffer
{"cuExternalMemoryGetMappedBuffer", {"hipExternalMemoryGetMappedBuffer", "", CONV_EXT_RES, API_DRIVER, HIP_UNSUPPORTED}},
// cudaExternalMemoryGetMappedMipmappedArray
{"cuExternalMemoryGetMappedMipmappedArray", {"hipExternalMemoryGetMappedMipmappedArray", "", CONV_EXT_RES, API_DRIVER, HIP_UNSUPPORTED}},
// cudaImportExternalMemory
{"cuImportExternalMemory", {"hipImportExternalMemory", "", CONV_EXT_RES, API_DRIVER, HIP_UNSUPPORTED}},
// cudaImportExternalSemaphore
{"cuImportExternalSemaphore", {"hipImportExternalSemaphore", "", CONV_EXT_RES, API_DRIVER, HIP_UNSUPPORTED}},
// cudaSignalExternalSemaphoresAsync
{"cuSignalExternalSemaphoresAsync", {"hipSignalExternalSemaphoresAsync", "", CONV_EXT_RES, API_DRIVER, HIP_UNSUPPORTED}},
// cudaWaitExternalSemaphoresAsync
{"cuWaitExternalSemaphoresAsync", {"hipWaitExternalSemaphoresAsync", "", CONV_EXT_RES, API_DRIVER, HIP_UNSUPPORTED}},
// 5.16. Stream Memory Operations
// no analogues
{"cuStreamBatchMemOp", {"hipStreamBatchMemOp", "", CONV_STREAM_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuStreamWaitValue32", {"hipStreamWaitValue32", "", CONV_STREAM_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuStreamWaitValue64", {"hipStreamWaitValue64", "", CONV_STREAM_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuStreamWriteValue32", {"hipStreamWriteValue32", "", CONV_STREAM_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuStreamWriteValue64", {"hipStreamWriteValue64", "", CONV_STREAM_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// 5.17.Execution Control
// no analogue
{"cuFuncGetAttribute", {"hipFuncGetAttribute", "", CONV_EXECUTION, API_DRIVER}},
// no analogue
// NOTE: Not equal to cudaFuncSetAttribute due to different signatures
{"cuFuncSetAttribute", {"hipFuncSetAttribute", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaFuncSetCacheConfig due to different signatures
{"cuFuncSetCacheConfig", {"hipFuncSetCacheConfig", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaFuncSetSharedMemConfig due to different signatures
{"cuFuncSetSharedMemConfig", {"hipFuncSetSharedMemConfig", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaLaunchCooperativeKernel due to different signatures
{"cuLaunchCooperativeKernel", {"hipLaunchCooperativeKernel_", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaLaunchCooperativeKernelMultiDevice due to different signatures
{"cuLaunchCooperativeKernelMultiDevice", {"hipLaunchCooperativeKernelMultiDevice_", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// cudaLaunchHostFunc
{"cuLaunchHostFunc", {"hipLaunchHostFunc", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaLaunchKernel due to different signatures
{"cuLaunchKernel", {"hipModuleLaunchKernel", "", CONV_EXECUTION, API_DRIVER}},
// 5.18.Execution Control [DEPRECATED]
// no analogue
{"cuFuncSetBlockShape", {"hipFuncSetBlockShape", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuFuncSetSharedSize", {"hipFuncSetSharedSize", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaLaunch due to different signatures
{"cuLaunch", {"hipLaunch", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuLaunchGrid", {"hipLaunchGrid", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuLaunchGridAsync", {"hipLaunchGridAsync", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuParamSetf", {"hipParamSetf", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuParamSeti", {"hipParamSeti", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuParamSetSize", {"hipParamSetSize", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuParamSetTexRef", {"hipParamSetTexRef", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuParamSetv", {"hipParamSetv", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// 5.19. Graph Management
// cudaGraphAddChildGraphNode
{"cuGraphAddChildGraphNode", {"hipGraphAddChildGraphNode", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphAddDependencies
{"cuGraphAddDependencies", {"hipGraphAddDependencies", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphAddEmptyNode
{"cuGraphAddEmptyNode", {"hipGraphAddEmptyNode", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphAddHostNode
{"cuGraphAddHostNode", {"hipGraphAddHostNode", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphAddKernelNode
{"cuGraphAddKernelNode", {"hipGraphAddKernelNode", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphAddMemcpyNode
{"cuGraphAddMemcpyNode", {"hipGraphAddMemcpyNode", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphAddMemsetNode
{"cuGraphAddMemsetNode", {"hipGraphAddMemsetNode", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphChildGraphNodeGetGraph
{"cuGraphChildGraphNodeGetGraph", {"hipGraphChildGraphNodeGetGraph", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphClone
{"cuGraphClone", {"hipGraphClone", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphCreate
{"cuGraphCreate", {"hipGraphCreate", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphDestroy
{"cuGraphDestroy", {"hipGraphDestroy", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphDestroyNode
{"cuGraphDestroyNode", {"hipGraphDestroyNode", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphExecDestroy
{"cuGraphExecDestroy", {"hipGraphExecDestroy", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphGetEdges
{"cuGraphGetEdges", {"hipGraphGetEdges", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphGetNodes
{"cuGraphGetNodes", {"hipGraphGetNodes", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphGetRootNodes
{"cuGraphGetRootNodes", {"hipGraphGetRootNodes", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphHostNodeGetParams
{"cuGraphHostNodeGetParams", {"hipGraphHostNodeGetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphHostNodeSetParams
{"cuGraphHostNodeSetParams", {"hipGraphHostNodeSetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphInstantiate
{"cuGraphInstantiate", {"hipGraphInstantiate", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphExecKernelNodeSetParams
{"cuGraphExecKernelNodeSetParams", {"hipGraphExecKernelNodeSetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphKernelNodeGetParams
{"cuGraphKernelNodeGetParams", {"hipGraphKernelNodeGetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphKernelNodeSetParams
{"cuGraphKernelNodeSetParams", {"hipGraphKernelNodeSetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphLaunch
{"cuGraphLaunch", {"hipGraphLaunch", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphMemcpyNodeGetParams
{"cuGraphMemcpyNodeGetParams", {"hipGraphMemcpyNodeGetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphMemcpyNodeSetParams
{"cuGraphMemcpyNodeSetParams", {"hipGraphMemcpyNodeSetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphMemsetNodeGetParams
{"cuGraphMemsetNodeGetParams", {"hipGraphMemsetNodeGetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphMemsetNodeSetParams
{"cuGraphMemsetNodeSetParams", {"hipGraphMemsetNodeSetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphNodeFindInClone
{"cuGraphNodeFindInClone", {"hipGraphNodeFindInClone", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphNodeGetDependencies
{"cuGraphNodeGetDependencies", {"hipGraphNodeGetDependencies", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphNodeGetDependentNodes
{"cuGraphNodeGetDependentNodes", {"hipGraphNodeGetDependentNodes", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphNodeGetType
{"cuGraphNodeGetType", {"hipGraphNodeGetType", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphRemoveDependencies
{"cuGraphRemoveDependencies", {"hipGraphRemoveDependencies", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// 5.20. Occupancy
// cudaOccupancyMaxActiveBlocksPerMultiprocessor
{"cuOccupancyMaxActiveBlocksPerMultiprocessor", {"hipOccupancyMaxActiveBlocksPerMultiprocessor", "", CONV_OCCUPANCY, API_DRIVER}},
// cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags
{"cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", {"hipOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", "", CONV_OCCUPANCY, API_DRIVER}},
// cudaOccupancyMaxPotentialBlockSize
{"cuOccupancyMaxPotentialBlockSize", {"hipOccupancyMaxPotentialBlockSize", "", CONV_OCCUPANCY, API_DRIVER}},
// cudaOccupancyMaxPotentialBlockSizeWithFlags
{"cuOccupancyMaxPotentialBlockSizeWithFlags", {"hipOccupancyMaxPotentialBlockSizeWithFlags", "", CONV_OCCUPANCY, API_DRIVER, HIP_UNSUPPORTED}},
// 5.21. Texture Reference Management
// no analogues
{"cuTexRefGetAddress", {"hipTexRefGetAddress", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetAddress_v2", {"hipTexRefGetAddress", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetAddressMode", {"hipTexRefGetAddressMode", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetArray", {"hipTexRefGetArray", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetBorderColor", {"hipTexRefGetBorderColor", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetFilterMode", {"hipTexRefGetFilterMode", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetFlags", {"hipTexRefGetFlags", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetFormat", {"hipTexRefGetFormat", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetMaxAnisotropy", {"hipTexRefGetMaxAnisotropy", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetMipmapFilterMode", {"hipTexRefGetMipmapFilterMode", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetMipmapLevelBias", {"hipTexRefGetMipmapLevelBias", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetMipmapLevelClamp", {"hipTexRefGetMipmapLevelClamp", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetMipmappedArray", {"hipTexRefGetMipmappedArray", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefSetAddress", {"hipTexRefSetAddress", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetAddress_v2", {"hipTexRefSetAddress", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetAddress2D", {"hipTexRefSetAddress2D", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetAddress2D_v2", {"hipTexRefSetAddress2D", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetAddress2D_v3", {"hipTexRefSetAddress2D", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetAddressMode", {"hipTexRefSetAddressMode", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetArray", {"hipTexRefSetArray", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetBorderColor", {"hipTexRefSetBorderColor", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefSetFilterMode", {"hipTexRefSetFilterMode", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetFlags", {"hipTexRefSetFlags", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetFormat", {"hipTexRefSetFormat", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetMaxAnisotropy", {"hipTexRefSetMaxAnisotropy", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefSetMipmapFilterMode", {"hipTexRefSetMipmapFilterMode", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefSetMipmapLevelBias", {"hipTexRefSetMipmapLevelBias", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefSetMipmapLevelClamp", {"hipTexRefSetMipmapLevelClamp", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefSetMipmappedArray", {"hipTexRefSetMipmappedArray", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// 5.22. Texture Reference Management [DEPRECATED]
// no analogues
{"cuTexRefCreate", {"hipTexRefCreate", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefDestroy", {"hipTexRefDestroy", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// 5.23. Surface Reference Management
// no analogues
{"cuSurfRefGetArray", {"hipSurfRefGetArray", "", CONV_SURFACE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuSurfRefSetArray", {"hipSurfRefSetArray", "", CONV_SURFACE, API_DRIVER, HIP_UNSUPPORTED}},
// 5.24. Texture Object Management
// no analogue
// NOTE: Not equal to cudaCreateTextureObject due to different signatures
{"cuTexObjectCreate", {"hipTexObjectCreate", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// cudaDestroyTextureObject
{"cuTexObjectDestroy", {"hipTexObjectDestroy", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaGetTextureObjectResourceDesc due to different signatures
{"cuTexObjectGetResourceDesc", {"hipTexObjectGetResourceDesc", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGetTextureObjectResourceViewDesc
{"cuTexObjectGetResourceViewDesc", {"hipTexObjectGetResourceViewDesc", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaGetTextureObjectTextureDesc due to different signatures
{"cuTexObjectGetTextureDesc", {"hipTexObjectGetTextureDesc", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// 5.25. Surface Object Management
// no analogue
// NOTE: Not equal to cudaCreateSurfaceObject due to different signatures
{"cuSurfObjectCreate", {"hipSurfObjectCreate", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// cudaDestroySurfaceObject
{"cuSurfObjectDestroy", {"hipSurfObjectDestroy", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaGetSurfaceObjectResourceDesc due to different signatures
{"cuSurfObjectGetResourceDesc", {"hipSurfObjectGetResourceDesc", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// 5.26. Peer Context Memory Access
// no analogue
// NOTE: Not equal to cudaDeviceEnablePeerAccess due to different signatures
{"cuCtxEnablePeerAccess", {"hipCtxEnablePeerAccess", "", CONV_PEER, API_DRIVER}},
// no analogue
// NOTE: Not equal to cudaDeviceDisablePeerAccess due to different signatures
{"cuCtxDisablePeerAccess", {"hipCtxDisablePeerAccess", "", CONV_PEER, API_DRIVER}},
// cudaDeviceCanAccessPeer
{"cuDeviceCanAccessPeer", {"hipDeviceCanAccessPeer", "", CONV_PEER, API_DRIVER}},
// cudaDeviceGetP2PAttribute
{"cuDeviceGetP2PAttribute", {"hipDeviceGetP2PAttribute", "", CONV_PEER, API_DRIVER, HIP_UNSUPPORTED}},
// 5.27. Graphics Interoperability
// cudaGraphicsMapResources
{"cuGraphicsMapResources", {"hipGraphicsMapResources", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsResourceGetMappedMipmappedArray
{"cuGraphicsResourceGetMappedMipmappedArray", {"hipGraphicsResourceGetMappedMipmappedArray", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsResourceGetMappedPointer
{"cuGraphicsResourceGetMappedPointer", {"hipGraphicsResourceGetMappedPointer", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsResourceGetMappedPointer
{"cuGraphicsResourceGetMappedPointer_v2", {"hipGraphicsResourceGetMappedPointer", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsResourceSetMapFlags
{"cuGraphicsResourceSetMapFlags", {"hipGraphicsResourceSetMapFlags", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsResourceSetMapFlags
{"cuGraphicsResourceSetMapFlags_v2", {"hipGraphicsResourceSetMapFlags", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsSubResourceGetMappedArray
{"cuGraphicsSubResourceGetMappedArray", {"hipGraphicsSubResourceGetMappedArray", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsUnmapResources
{"cuGraphicsUnmapResources", {"hipGraphicsUnmapResources", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsUnregisterResource
{"cuGraphicsUnregisterResource", {"hipGraphicsUnregisterResource", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// 5.28. Profiler Control
// cudaProfilerInitialize
{"cuProfilerInitialize", {"hipProfilerInitialize", "", CONV_PROFILER, API_DRIVER, HIP_UNSUPPORTED}},
// cudaProfilerStart
{"cuProfilerStart", {"hipProfilerStart", "", CONV_PROFILER, API_DRIVER}},
// cudaProfilerStop
{"cuProfilerStop", {"hipProfilerStop", "", CONV_PROFILER, API_DRIVER}},
// 5.29. OpenGL Interoperability
// cudaGLGetDevices
{"cuGLGetDevices", {"hipGLGetDevices", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsGLRegisterBuffer
{"cuGraphicsGLRegisterBuffer", {"hipGraphicsGLRegisterBuffer", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsGLRegisterImage
{"cuGraphicsGLRegisterImage", {"hipGraphicsGLRegisterImage", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaWGLGetDevice
{"cuWGLGetDevice", {"hipWGLGetDevice", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// 5.29. OpenGL Interoperability [DEPRECATED]
// no analogue
{"cuGLCtxCreate", {"hipGLCtxCreate", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuGLInit", {"hipGLInit", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaGLMapBufferObject due to different signatures
{"cuGLMapBufferObject", {"hipGLMapBufferObject_", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaGLMapBufferObjectAsync due to different signatures
{"cuGLMapBufferObjectAsync", {"hipGLMapBufferObjectAsync_", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGLRegisterBufferObject
{"cuGLRegisterBufferObject", {"hipGLRegisterBufferObject", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGLSetBufferObjectMapFlags
{"cuGLSetBufferObjectMapFlags", {"hipGLSetBufferObjectMapFlags", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGLUnmapBufferObject
{"cuGLUnmapBufferObject", {"hipGLUnmapBufferObject", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGLUnmapBufferObjectAsync
{"cuGLUnmapBufferObjectAsync", {"hipGLUnmapBufferObjectAsync", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGLUnregisterBufferObject
{"cuGLUnregisterBufferObject", {"hipGLUnregisterBufferObject", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// 5.30.Direct3D 9 Interoperability
// no analogue
{"cuD3D9CtxCreate", {"hipD3D9CtxCreate", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuD3D9CtxCreateOnDevice", {"hipD3D9CtxCreateOnDevice", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9GetDevice
{"cuD3D9GetDevice", {"hipD3D9GetDevice", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9GetDevices
{"cuD3D9GetDevices", {"hipD3D9GetDevices", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9GetDirect3DDevice
{"cuD3D9GetDirect3DDevice", {"hipD3D9GetDirect3DDevice", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsD3D9RegisterResource
{"cuGraphicsD3D9RegisterResource", {"hipGraphicsD3D9RegisterResource", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// 5.30.Direct3D 9 Interoperability [DEPRECATED]
// cudaD3D9MapResources
{"cuD3D9MapResources", {"hipD3D9MapResources", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9RegisterResource
{"cuD3D9RegisterResource", {"hipD3D9RegisterResource", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9ResourceGetMappedArray
{"cuD3D9ResourceGetMappedArray", {"hipD3D9ResourceGetMappedArray", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9ResourceGetMappedPitch
{"cuD3D9ResourceGetMappedPitch", {"hipD3D9ResourceGetMappedPitch", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9ResourceGetMappedPointer
{"cuD3D9ResourceGetMappedPointer", {"hipD3D9ResourceGetMappedPointer", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9ResourceGetMappedSize
{"cuD3D9ResourceGetMappedSize", {"hipD3D9ResourceGetMappedSize", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9ResourceGetSurfaceDimensions
{"cuD3D9ResourceGetSurfaceDimensions", {"hipD3D9ResourceGetSurfaceDimensions", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9ResourceSetMapFlags
{"cuD3D9ResourceSetMapFlags", {"hipD3D9ResourceSetMapFlags", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9UnmapResources
{"cuD3D9UnmapResources", {"hipD3D9UnmapResources", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9UnregisterResource
{"cuD3D9UnregisterResource", {"hipD3D9UnregisterResource", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// 5.31. Direct3D 10 Interoperability
// cudaD3D10GetDevice
{"cuD3D10GetDevice", {"hipD3D10GetDevice", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10GetDevices
{"cuD3D10GetDevices", {"hipD3D10GetDevices", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsD3D10RegisterResource
{"cuGraphicsD3D10RegisterResource", {"hipGraphicsD3D10RegisterResource", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// 5.31. Direct3D 10 Interoperability [DEPRECATED]
// no analogue
{"cuD3D10CtxCreate", {"hipD3D10CtxCreate", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuD3D10CtxCreateOnDevice", {"hipD3D10CtxCreateOnDevice", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10GetDirect3DDevice
{"cuD3D10GetDirect3DDevice", {"hipD3D10GetDirect3DDevice", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10MapResources
{"cuD3D10MapResources", {"hipD3D10MapResources", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10RegisterResource
{"cuD3D10RegisterResource", {"hipD3D10RegisterResource", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10ResourceGetMappedArray
{"cuD3D10ResourceGetMappedArray", {"hipD3D10ResourceGetMappedArray", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10ResourceGetMappedPitch
{"cuD3D10ResourceGetMappedPitch", {"hipD3D10ResourceGetMappedPitch", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10ResourceGetMappedPointer
{"cuD3D10ResourceGetMappedPointer", {"hipD3D10ResourceGetMappedPointer", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10ResourceGetMappedSize
{"cuD3D10ResourceGetMappedSize", {"hipD3D10ResourceGetMappedSize", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10ResourceGetSurfaceDimensions
{"cuD3D10ResourceGetSurfaceDimensions", {"hipD3D10ResourceGetSurfaceDimensions", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10ResourceSetMapFlags
{"cuD310ResourceSetMapFlags", {"hipD3D10ResourceSetMapFlags", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10UnmapResources
{"cuD3D10UnmapResources", {"hipD3D10UnmapResources", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10UnregisterResource
{"cuD3D10UnregisterResource", {"hipD3D10UnregisterResource", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// 5.32. Direct3D 11 Interoperability
// cudaD3D11GetDevice
{"cuD3D11GetDevice", {"hipD3D11GetDevice", "", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D11GetDevices
{"cuD3D11GetDevices", {"hipD3D11GetDevices", "", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsD3D11RegisterResource
{"cuGraphicsD3D11RegisterResource", {"hipGraphicsD3D11RegisterResource", "", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED}},
// 5.32. Direct3D 11 Interoperability [DEPRECATED]
// no analogue
{"cuD3D11CtxCreate", {"hipD3D11CtxCreate", "", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuD3D11CtxCreateOnDevice", {"hipD3D11CtxCreateOnDevice", "", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D11GetDirect3DDevice
{"cuD3D11GetDirect3DDevice", {"hipD3D11GetDirect3DDevice", "", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED}},
// 5.33. VDPAU Interoperability
// cudaGraphicsVDPAURegisterOutputSurface
{"cuGraphicsVDPAURegisterOutputSurface", {"hipGraphicsVDPAURegisterOutputSurface", "", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsVDPAURegisterVideoSurface
{"cuGraphicsVDPAURegisterVideoSurface", {"hipGraphicsVDPAURegisterVideoSurface", "", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED}},
// cudaVDPAUGetDevice
{"cuVDPAUGetDevice", {"hipVDPAUGetDevice", "", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuVDPAUCtxCreate", {"hipVDPAUCtxCreate", "", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED}},
// 5.34. EGL Interoperability
// cudaEGLStreamConsumerAcquireFrame
{"cuEGLStreamConsumerAcquireFrame", {"hipEGLStreamConsumerAcquireFrame", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEGLStreamConsumerConnect
{"cuEGLStreamConsumerConnect", {"hipEGLStreamConsumerConnect", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEGLStreamConsumerConnectWithFlags
{"cuEGLStreamConsumerConnectWithFlags", {"hipEGLStreamConsumerConnectWithFlags", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEGLStreamConsumerDisconnect
{"cuEGLStreamConsumerDisconnect", {"hipEGLStreamConsumerDisconnect", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEGLStreamConsumerReleaseFrame
{"cuEGLStreamConsumerReleaseFrame", {"hipEGLStreamConsumerReleaseFrame", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEGLStreamProducerConnect
{"cuEGLStreamProducerConnect", {"hipEGLStreamProducerConnect", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEGLStreamProducerDisconnect
{"cuEGLStreamProducerDisconnect", {"hipEGLStreamProducerDisconnect", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEGLStreamProducerPresentFrame
{"cuEGLStreamProducerPresentFrame", {"hipEGLStreamProducerPresentFrame", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEGLStreamProducerReturnFrame
{"cuEGLStreamProducerReturnFrame", {"hipEGLStreamProducerReturnFrame", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsEGLRegisterImage
{"cuGraphicsEGLRegisterImage", {"hipGraphicsEGLRegisterImage", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsResourceGetMappedEglFrame
{"cuGraphicsResourceGetMappedEglFrame", {"hipGraphicsResourceGetMappedEglFrame", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEventCreateFromEGLSync
{"cuEventCreateFromEGLSync", {"hipEventCreateFromEGLSync", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
};
| 1 | 8,085 | Remove `HIP_UNSUPPORTED` - it is an alias | ROCm-Developer-Tools-HIP | cpp |
@@ -388,4 +388,10 @@ public class MockExecutorLoader implements ExecutorLoader {
public void unassignExecutor(int executionId) throws ExecutorManagerException {
executionExecutorMapping.remove(executionId);
}
+
+ @Override
+ public List<ExecutableFlow> fetchRecentlyFinishedFlows(long lifeTimeMs)
+ throws ExecutorManagerException {
+ return null;
+ }
} | 1 | /*
* Copyright 2014 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor;
import java.io.File;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import azkaban.executor.ExecutorLogEvent.EventType;
import azkaban.utils.FileIOUtils.LogData;
import azkaban.utils.Pair;
import azkaban.utils.Props;
public class MockExecutorLoader implements ExecutorLoader {
HashMap<Integer, Integer> executionExecutorMapping =
new HashMap<Integer, Integer>();
HashMap<Integer, ExecutableFlow> flows =
new HashMap<Integer, ExecutableFlow>();
HashMap<String, ExecutableNode> nodes = new HashMap<String, ExecutableNode>();
HashMap<Integer, ExecutionReference> refs =
new HashMap<Integer, ExecutionReference>();
int flowUpdateCount = 0;
HashMap<String, Integer> jobUpdateCount = new HashMap<String, Integer>();
Map<Integer, Pair<ExecutionReference, ExecutableFlow>> activeFlows =
new HashMap<Integer, Pair<ExecutionReference, ExecutableFlow>>();
List<Executor> executors = new ArrayList<Executor>();
int executorIdCounter = 0;
Map<Integer, ArrayList<ExecutorLogEvent>> executorEvents =
new HashMap<Integer, ArrayList<ExecutorLogEvent>>();
@Override
public void uploadExecutableFlow(ExecutableFlow flow)
throws ExecutorManagerException {
flows.put(flow.getExecutionId(), flow);
flowUpdateCount++;
}
@Override
public ExecutableFlow fetchExecutableFlow(int execId)
throws ExecutorManagerException {
ExecutableFlow flow = flows.get(execId);
return ExecutableFlow.createExecutableFlowFromObject(flow.toObject());
}
@Override
public Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchActiveFlows()
throws ExecutorManagerException {
return activeFlows;
}
@Override
public Pair<ExecutionReference, ExecutableFlow> fetchActiveFlowByExecId(int execId)
throws ExecutorManagerException {
return activeFlows.get(execId);
}
@Override
public List<ExecutableFlow> fetchFlowHistory(int projectId, String flowId,
int skip, int num) throws ExecutorManagerException {
return null;
}
@Override
public void addActiveExecutableReference(ExecutionReference ref)
throws ExecutorManagerException {
refs.put(ref.getExecId(), ref);
}
@Override
public void removeActiveExecutableReference(int execId)
throws ExecutorManagerException {
refs.remove(execId);
}
public boolean hasActiveExecutableReference(int execId) {
return refs.containsKey(execId);
}
@Override
public void uploadLogFile(int execId, String name, int attempt, File... files)
throws ExecutorManagerException {
}
@Override
public void updateExecutableFlow(ExecutableFlow flow)
throws ExecutorManagerException {
ExecutableFlow toUpdate = flows.get(flow.getExecutionId());
toUpdate.applyUpdateObject((Map<String, Object>) flow.toUpdateObject(0));
flowUpdateCount++;
}
@Override
public void uploadExecutableNode(ExecutableNode node, Props inputParams)
throws ExecutorManagerException {
ExecutableNode exNode = new ExecutableNode();
exNode.fillExecutableFromMapObject(node.toObject());
nodes.put(node.getId(), exNode);
jobUpdateCount.put(node.getId(), 1);
}
@Override
public void updateExecutableNode(ExecutableNode node)
throws ExecutorManagerException {
ExecutableNode foundNode = nodes.get(node.getId());
foundNode.setEndTime(node.getEndTime());
foundNode.setStartTime(node.getStartTime());
foundNode.setStatus(node.getStatus());
foundNode.setUpdateTime(node.getUpdateTime());
Integer value = jobUpdateCount.get(node.getId());
if (value == null) {
throw new ExecutorManagerException("The node has not been uploaded");
} else {
jobUpdateCount.put(node.getId(), ++value);
}
flowUpdateCount++;
}
@Override
public int fetchNumExecutableFlows(int projectId, String flowId)
throws ExecutorManagerException {
return 0;
}
@Override
public int fetchNumExecutableFlows() throws ExecutorManagerException {
// TODO Auto-generated method stub
return 0;
}
public int getFlowUpdateCount() {
return flowUpdateCount;
}
public Integer getNodeUpdateCount(String jobId) {
return jobUpdateCount.get(jobId);
}
@Override
public ExecutableJobInfo fetchJobInfo(int execId, String jobId, int attempt)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public boolean updateExecutableReference(int execId, long updateTime)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return true;
}
@Override
public LogData fetchLogs(int execId, String name, int attempt, int startByte,
int endByte) throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public List<ExecutableFlow> fetchFlowHistory(int skip, int num)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public List<ExecutableFlow> fetchFlowHistory(String projectContains,
String flowContains, String userNameContains, int status, long startData,
long endData, int skip, int num) throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public List<ExecutableJobInfo> fetchJobHistory(int projectId, String jobId,
int skip, int size) throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public int fetchNumExecutableNodes(int projectId, String jobId)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return 0;
}
@Override
public Props fetchExecutionJobInputProps(int execId, String jobId)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public Props fetchExecutionJobOutputProps(int execId, String jobId)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public Pair<Props, Props> fetchExecutionJobProps(int execId, String jobId)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public List<ExecutableJobInfo> fetchJobInfoAttempts(int execId, String jobId)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public int removeExecutionLogsByTime(long millis)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return 0;
}
@Override
public List<ExecutableFlow> fetchFlowHistory(int projectId, String flowId,
int skip, int num, Status status) throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public List<Object> fetchAttachments(int execId, String name, int attempt)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public void uploadAttachmentFile(ExecutableNode node, File file)
throws ExecutorManagerException {
// TODO Auto-generated method stub
}
@Override
public List<Executor> fetchActiveExecutors() throws ExecutorManagerException {
List<Executor> activeExecutors = new ArrayList<Executor>();
for (Executor executor : executors) {
if (executor.isActive()) {
activeExecutors.add(executor);
}
}
return activeExecutors;
}
@Override
public Executor fetchExecutor(String host, int port)
throws ExecutorManagerException {
for (Executor executor : executors) {
if (executor.getHost().equals(host) && executor.getPort() == port) {
return executor;
}
}
return null;
}
@Override
public Executor fetchExecutor(int executorId) throws ExecutorManagerException {
for (Executor executor : executors) {
if (executor.getId() == executorId) {
return executor;
}
}
return null;
}
@Override
public Executor addExecutor(String host, int port)
throws ExecutorManagerException {
Executor executor = null;
if (fetchExecutor(host, port) == null) {
executorIdCounter++;
executor = new Executor(executorIdCounter, host, port, true);
executors.add(executor);
}
return executor;
}
@Override
public void removeExecutor(String host, int port) throws ExecutorManagerException {
Executor executor = fetchExecutor(host, port);
if (executor != null) {
executorIdCounter--;
executors.remove(executor);
}
}
@Override
public void postExecutorEvent(Executor executor, EventType type, String user,
String message) throws ExecutorManagerException {
ExecutorLogEvent event =
new ExecutorLogEvent(executor.getId(), user, new Date(), type, message);
if (!executorEvents.containsKey(executor.getId())) {
executorEvents.put(executor.getId(), new ArrayList<ExecutorLogEvent>());
}
executorEvents.get(executor.getId()).add(event);
}
@Override
public List<ExecutorLogEvent> getExecutorEvents(Executor executor, int num,
int skip) throws ExecutorManagerException {
if (!executorEvents.containsKey(executor.getId())) {
List<ExecutorLogEvent> events = executorEvents.get(executor.getId());
return events.subList(skip, Math.min(num + skip - 1, events.size() - 1));
}
return null;
}
@Override
public void updateExecutor(Executor executor) throws ExecutorManagerException {
Executor oldExecutor = fetchExecutor(executor.getId());
executors.remove(oldExecutor);
executors.add(executor);
}
@Override
public List<Executor> fetchAllExecutors() throws ExecutorManagerException {
return executors;
}
@Override
public void assignExecutor(int executorId, int execId)
throws ExecutorManagerException {
ExecutionReference ref = refs.get(execId);
ref.setExecutor(fetchExecutor(executorId));
executionExecutorMapping.put(execId, executorId);
}
@Override
public Executor fetchExecutorByExecutionId(int execId) throws ExecutorManagerException {
if (executionExecutorMapping.containsKey(execId)) {
return fetchExecutor(executionExecutorMapping.get(execId));
} else {
throw new ExecutorManagerException(
"Failed to find executor with execution : " + execId);
}
}
@Override
public List<Pair<ExecutionReference, ExecutableFlow>> fetchQueuedFlows()
throws ExecutorManagerException {
List<Pair<ExecutionReference, ExecutableFlow>> queuedFlows =
new ArrayList<Pair<ExecutionReference, ExecutableFlow>>();
for (int execId : refs.keySet()) {
if (!executionExecutorMapping.containsKey(execId)) {
queuedFlows.add(new Pair<ExecutionReference, ExecutableFlow>(refs
.get(execId), flows.get(execId)));
}
}
return queuedFlows;
}
@Override
public void unassignExecutor(int executionId) throws ExecutorManagerException {
executionExecutorMapping.remove(executionId);
}
}
| 1 | 13,201 | i think we should return an empty list instead of null here. | azkaban-azkaban | java |
@@ -104,10 +104,14 @@ public class UserPreferences {
public static final String PREF_ENABLE_AUTODL_WIFI_FILTER = "prefEnableAutoDownloadWifiFilter";
private static final String PREF_AUTODL_SELECTED_NETWORKS = "prefAutodownloadSelectedNetworks";
private static final String PREF_PROXY_TYPE = "prefProxyType";
- private static final String PREF_PROXY_HOST = "prefProxyHost";
- private static final String PREF_PROXY_PORT = "prefProxyPort";
- private static final String PREF_PROXY_USER = "prefProxyUser";
- private static final String PREF_PROXY_PASSWORD = "prefProxyPassword";
+ private static final String PREF_HTTP_PROXY_HOST = "prefHttpProxyHost";
+ private static final String PREF_HTTP_PROXY_PORT = "prefHttpProxyPort";
+ private static final String PREF_HTTP_PROXY_USER = "prefHttpProxyUser";
+ private static final String PREF_HTTP_PROXY_PASSWORD = "prefHttpProxyPassword";
+ private static final String PREF_SOCKS_PROXY_HOST = "prefSocksProxyHost";
+ private static final String PREF_SOCKS_PROXY_PORT = "prefSocksProxyPort";
+ private static final String PREF_SOCKS_PROXY_USER = "prefSocksProxyUser";
+ private static final String PREF_SOCKS_PROXY_PASSWORD = "prefSocksProxyPassword";
// Services
private static final String PREF_GPODNET_NOTIFICATIONS = "pref_gpodnet_notifications"; | 1 | package de.danoeh.antennapod.core.preferences;
import android.content.Context;
import android.content.SharedPreferences;
import android.content.res.Configuration;
import android.os.Build;
import android.text.TextUtils;
import android.util.Log;
import android.view.KeyEvent;
import androidx.annotation.NonNull;
import androidx.annotation.VisibleForTesting;
import androidx.core.app.NotificationCompat;
import androidx.preference.PreferenceManager;
import org.json.JSONArray;
import org.json.JSONException;
import java.io.File;
import java.io.IOException;
import java.net.Proxy;
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import de.danoeh.antennapod.core.R;
import de.danoeh.antennapod.model.playback.MediaType;
import de.danoeh.antennapod.core.feed.SubscriptionsFilter;
import de.danoeh.antennapod.core.service.download.ProxyConfig;
import de.danoeh.antennapod.core.storage.APCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.ExceptFavoriteCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.APNullCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.APQueueCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.EpisodeCleanupAlgorithm;
import de.danoeh.antennapod.model.feed.SortOrder;
import de.danoeh.antennapod.core.util.download.AutoUpdateManager;
/**
* Provides access to preferences set by the user in the settings screen. A
* private instance of this class must first be instantiated via
* init() or otherwise every public method will throw an Exception
* when called.
*/
public class UserPreferences {
private UserPreferences(){}
private static final String TAG = "UserPreferences";
// User Interface
public static final String PREF_THEME = "prefTheme";
public static final String PREF_HIDDEN_DRAWER_ITEMS = "prefHiddenDrawerItems";
public static final String PREF_DRAWER_FEED_ORDER = "prefDrawerFeedOrder";
private static final String PREF_DRAWER_FEED_COUNTER = "prefDrawerFeedIndicator";
public static final String PREF_EXPANDED_NOTIFICATION = "prefExpandNotify";
public static final String PREF_USE_EPISODE_COVER = "prefEpisodeCover";
public static final String PREF_SHOW_TIME_LEFT = "showTimeLeft";
private static final String PREF_PERSISTENT_NOTIFICATION = "prefPersistNotify";
public static final String PREF_COMPACT_NOTIFICATION_BUTTONS = "prefCompactNotificationButtons";
public static final String PREF_LOCKSCREEN_BACKGROUND = "prefLockscreenBackground";
private static final String PREF_SHOW_DOWNLOAD_REPORT = "prefShowDownloadReport";
private static final String PREF_SHOW_AUTO_DOWNLOAD_REPORT = "prefShowAutoDownloadReport";
public static final String PREF_BACK_BUTTON_BEHAVIOR = "prefBackButtonBehavior";
private static final String PREF_BACK_BUTTON_GO_TO_PAGE = "prefBackButtonGoToPage";
public static final String PREF_FILTER_FEED = "prefSubscriptionsFilter";
public static final String PREF_QUEUE_KEEP_SORTED = "prefQueueKeepSorted";
public static final String PREF_QUEUE_KEEP_SORTED_ORDER = "prefQueueKeepSortedOrder";
// Playback
public static final String PREF_PAUSE_ON_HEADSET_DISCONNECT = "prefPauseOnHeadsetDisconnect";
public static final String PREF_UNPAUSE_ON_HEADSET_RECONNECT = "prefUnpauseOnHeadsetReconnect";
private static final String PREF_UNPAUSE_ON_BLUETOOTH_RECONNECT = "prefUnpauseOnBluetoothReconnect";
public static final String PREF_HARDWARE_FORWARD_BUTTON = "prefHardwareForwardButton";
public static final String PREF_HARDWARE_PREVIOUS_BUTTON = "prefHardwarePreviousButton";
public static final String PREF_FOLLOW_QUEUE = "prefFollowQueue";
public static final String PREF_SKIP_KEEPS_EPISODE = "prefSkipKeepsEpisode";
private static final String PREF_FAVORITE_KEEPS_EPISODE = "prefFavoriteKeepsEpisode";
private static final String PREF_AUTO_DELETE = "prefAutoDelete";
public static final String PREF_SMART_MARK_AS_PLAYED_SECS = "prefSmartMarkAsPlayedSecs";
private static final String PREF_PLAYBACK_SPEED_ARRAY = "prefPlaybackSpeedArray";
private static final String PREF_PAUSE_PLAYBACK_FOR_FOCUS_LOSS = "prefPauseForFocusLoss";
private static final String PREF_RESUME_AFTER_CALL = "prefResumeAfterCall";
public static final String PREF_VIDEO_BEHAVIOR = "prefVideoBehavior";
private static final String PREF_TIME_RESPECTS_SPEED = "prefPlaybackTimeRespectsSpeed";
public static final String PREF_STREAM_OVER_DOWNLOAD = "prefStreamOverDownload";
// Network
private static final String PREF_ENQUEUE_DOWNLOADED = "prefEnqueueDownloaded";
public static final String PREF_ENQUEUE_LOCATION = "prefEnqueueLocation";
public static final String PREF_UPDATE_INTERVAL = "prefAutoUpdateIntervall";
private static final String PREF_MOBILE_UPDATE = "prefMobileUpdateTypes";
public static final String PREF_EPISODE_CLEANUP = "prefEpisodeCleanup";
public static final String PREF_PARALLEL_DOWNLOADS = "prefParallelDownloads";
public static final String PREF_EPISODE_CACHE_SIZE = "prefEpisodeCacheSize";
public static final String PREF_ENABLE_AUTODL = "prefEnableAutoDl";
public static final String PREF_ENABLE_AUTODL_ON_BATTERY = "prefEnableAutoDownloadOnBattery";
public static final String PREF_ENABLE_AUTODL_WIFI_FILTER = "prefEnableAutoDownloadWifiFilter";
private static final String PREF_AUTODL_SELECTED_NETWORKS = "prefAutodownloadSelectedNetworks";
private static final String PREF_PROXY_TYPE = "prefProxyType";
private static final String PREF_PROXY_HOST = "prefProxyHost";
private static final String PREF_PROXY_PORT = "prefProxyPort";
private static final String PREF_PROXY_USER = "prefProxyUser";
private static final String PREF_PROXY_PASSWORD = "prefProxyPassword";
// Services
private static final String PREF_GPODNET_NOTIFICATIONS = "pref_gpodnet_notifications";
// Other
private static final String PREF_DATA_FOLDER = "prefDataFolder";
public static final String PREF_DELETE_REMOVES_FROM_QUEUE = "prefDeleteRemovesFromQueue";
public static final String PREF_USAGE_COUNTING_DATE = "prefUsageCounting";
// Mediaplayer
public static final String PREF_MEDIA_PLAYER = "prefMediaPlayer";
public static final String PREF_MEDIA_PLAYER_EXOPLAYER = "exoplayer";
private static final String PREF_PLAYBACK_SPEED = "prefPlaybackSpeed";
private static final String PREF_VIDEO_PLAYBACK_SPEED = "prefVideoPlaybackSpeed";
public static final String PREF_PLAYBACK_SKIP_SILENCE = "prefSkipSilence";
private static final String PREF_FAST_FORWARD_SECS = "prefFastForwardSecs";
private static final String PREF_REWIND_SECS = "prefRewindSecs";
private static final String PREF_QUEUE_LOCKED = "prefQueueLocked";
private static final String PREF_LEFT_VOLUME = "prefLeftVolume";
private static final String PREF_RIGHT_VOLUME = "prefRightVolume";
// Experimental
private static final String PREF_STEREO_TO_MONO = "PrefStereoToMono";
public static final String PREF_CAST_ENABLED = "prefCast"; //Used for enabling Chromecast support
public static final int EPISODE_CLEANUP_QUEUE = -1;
public static final int EPISODE_CLEANUP_NULL = -2;
public static final int EPISODE_CLEANUP_EXCEPT_FAVORITE = -3;
public static final int EPISODE_CLEANUP_DEFAULT = 0;
// Constants
private static final int NOTIFICATION_BUTTON_REWIND = 0;
private static final int NOTIFICATION_BUTTON_FAST_FORWARD = 1;
private static final int NOTIFICATION_BUTTON_SKIP = 2;
private static final int EPISODE_CACHE_SIZE_UNLIMITED = -1;
public static final int FEED_ORDER_COUNTER = 0;
public static final int FEED_ORDER_ALPHABETICAL = 1;
public static final int FEED_ORDER_MOST_PLAYED = 3;
public static final int FEED_COUNTER_SHOW_NEW_UNPLAYED_SUM = 0;
public static final int FEED_COUNTER_SHOW_NEW = 1;
public static final int FEED_COUNTER_SHOW_UNPLAYED = 2;
public static final int FEED_COUNTER_SHOW_NONE = 3;
public static final int FEED_COUNTER_SHOW_DOWNLOADED = 4;
private static Context context;
private static SharedPreferences prefs;
/**
* Sets up the UserPreferences class.
*
* @throws IllegalArgumentException if context is null
*/
public static void init(@NonNull Context context) {
Log.d(TAG, "Creating new instance of UserPreferences");
UserPreferences.context = context.getApplicationContext();
UserPreferences.prefs = PreferenceManager.getDefaultSharedPreferences(context);
createNoMediaFile();
}
/**
* Returns theme as R.style value
*
* @return R.style.Theme_AntennaPod_Light or R.style.Theme_AntennaPod_Dark
*/
public static int getTheme() {
return readThemeValue(prefs.getString(PREF_THEME, "system"));
}
public static int getNoTitleTheme() {
int theme = getTheme();
if (theme == R.style.Theme_AntennaPod_Dark) {
return R.style.Theme_AntennaPod_Dark_NoTitle;
} else if (theme == R.style.Theme_AntennaPod_TrueBlack) {
return R.style.Theme_AntennaPod_TrueBlack_NoTitle;
} else {
return R.style.Theme_AntennaPod_Light_NoTitle;
}
}
public static int getTranslucentTheme() {
int theme = getTheme();
if (theme == R.style.Theme_AntennaPod_Dark) {
return R.style.Theme_AntennaPod_Dark_Translucent;
} else if (theme == R.style.Theme_AntennaPod_TrueBlack) {
return R.style.Theme_AntennaPod_TrueBlack_Translucent;
} else {
return R.style.Theme_AntennaPod_Light_Translucent;
}
}
public static List<String> getHiddenDrawerItems() {
String hiddenItems = prefs.getString(PREF_HIDDEN_DRAWER_ITEMS, "");
return new ArrayList<>(Arrays.asList(TextUtils.split(hiddenItems, ",")));
}
public static List<Integer> getCompactNotificationButtons() {
String[] buttons = TextUtils.split(
prefs.getString(PREF_COMPACT_NOTIFICATION_BUTTONS,
NOTIFICATION_BUTTON_REWIND + "," + NOTIFICATION_BUTTON_FAST_FORWARD),
",");
List<Integer> notificationButtons = new ArrayList<>();
for (String button : buttons) {
notificationButtons.add(Integer.parseInt(button));
}
return notificationButtons;
}
/**
* Helper function to return whether the specified button should be shown on compact
* notifications.
*
* @param buttonId Either NOTIFICATION_BUTTON_REWIND, NOTIFICATION_BUTTON_FAST_FORWARD or
* NOTIFICATION_BUTTON_SKIP.
* @return {@code true} if button should be shown, {@code false} otherwise
*/
private static boolean showButtonOnCompactNotification(int buttonId) {
return getCompactNotificationButtons().contains(buttonId);
}
public static boolean showRewindOnCompactNotification() {
return showButtonOnCompactNotification(NOTIFICATION_BUTTON_REWIND);
}
public static boolean showFastForwardOnCompactNotification() {
return showButtonOnCompactNotification(NOTIFICATION_BUTTON_FAST_FORWARD);
}
public static boolean showSkipOnCompactNotification() {
return showButtonOnCompactNotification(NOTIFICATION_BUTTON_SKIP);
}
public static int getFeedOrder() {
String value = prefs.getString(PREF_DRAWER_FEED_ORDER, "" + FEED_ORDER_COUNTER);
return Integer.parseInt(value);
}
public static void setFeedOrder(String selected) {
prefs.edit()
.putString(PREF_DRAWER_FEED_ORDER, selected)
.apply();
}
public static int getFeedCounterSetting() {
String value = prefs.getString(PREF_DRAWER_FEED_COUNTER, "" + FEED_COUNTER_SHOW_NEW);
return Integer.parseInt(value);
}
/**
* @return {@code true} if episodes should use their own cover, {@code false} otherwise
*/
public static boolean getUseEpisodeCoverSetting() {
return prefs.getBoolean(PREF_USE_EPISODE_COVER, true);
}
/**
* @return {@code true} if we should show remaining time or the duration
*/
public static boolean shouldShowRemainingTime() {
return prefs.getBoolean(PREF_SHOW_TIME_LEFT, false);
}
/**
* Sets the preference for whether we show the remain time, if not show the duration. This will
* send out events so the current playing screen, queue and the episode list would refresh
*
* @return {@code true} if we should show remaining time or the duration
*/
public static void setShowRemainTimeSetting(Boolean showRemain) {
prefs.edit().putBoolean(PREF_SHOW_TIME_LEFT, showRemain).apply();
}
/**
* Returns notification priority.
*
* @return NotificationCompat.PRIORITY_MAX or NotificationCompat.PRIORITY_DEFAULT
*/
public static int getNotifyPriority() {
if (prefs.getBoolean(PREF_EXPANDED_NOTIFICATION, false)) {
return NotificationCompat.PRIORITY_MAX;
} else {
return NotificationCompat.PRIORITY_DEFAULT;
}
}
/**
* Returns true if notifications are persistent
*
* @return {@code true} if notifications are persistent, {@code false} otherwise
*/
public static boolean isPersistNotify() {
return prefs.getBoolean(PREF_PERSISTENT_NOTIFICATION, true);
}
/**
* Returns true if the lockscreen background should be set to the current episode's image
*
* @return {@code true} if the lockscreen background should be set, {@code false} otherwise
*/
public static boolean setLockscreenBackground() {
return prefs.getBoolean(PREF_LOCKSCREEN_BACKGROUND, true);
}
/**
* Returns true if download reports are shown
*
* @return {@code true} if download reports are shown, {@code false} otherwise
*/
public static boolean showDownloadReport() {
if (Build.VERSION.SDK_INT >= 26) {
return true; // System handles notification preferences
}
return prefs.getBoolean(PREF_SHOW_DOWNLOAD_REPORT, true);
}
/**
* Used for migration of the preference to system notification channels.
*/
public static boolean getShowDownloadReportRaw() {
return prefs.getBoolean(PREF_SHOW_DOWNLOAD_REPORT, true);
}
public static boolean showAutoDownloadReport() {
if (Build.VERSION.SDK_INT >= 26) {
return true; // System handles notification preferences
}
return prefs.getBoolean(PREF_SHOW_AUTO_DOWNLOAD_REPORT, false);
}
/**
* Used for migration of the preference to system notification channels.
*/
public static boolean getShowAutoDownloadReportRaw() {
return prefs.getBoolean(PREF_SHOW_AUTO_DOWNLOAD_REPORT, false);
}
public static boolean enqueueDownloadedEpisodes() {
return prefs.getBoolean(PREF_ENQUEUE_DOWNLOADED, true);
}
@VisibleForTesting
public static void setEnqueueDownloadedEpisodes(boolean enqueueDownloadedEpisodes) {
prefs.edit()
.putBoolean(PREF_ENQUEUE_DOWNLOADED, enqueueDownloadedEpisodes)
.apply();
}
public enum EnqueueLocation {
BACK, FRONT, AFTER_CURRENTLY_PLAYING
}
@NonNull
public static EnqueueLocation getEnqueueLocation() {
String valStr = prefs.getString(PREF_ENQUEUE_LOCATION, EnqueueLocation.BACK.name());
try {
return EnqueueLocation.valueOf(valStr);
} catch (Throwable t) {
// should never happen but just in case
Log.e(TAG, "getEnqueueLocation: invalid value '" + valStr + "' Use default.", t);
return EnqueueLocation.BACK;
}
}
public static void setEnqueueLocation(@NonNull EnqueueLocation location) {
prefs.edit()
.putString(PREF_ENQUEUE_LOCATION, location.name())
.apply();
}
public static boolean isPauseOnHeadsetDisconnect() {
return prefs.getBoolean(PREF_PAUSE_ON_HEADSET_DISCONNECT, true);
}
public static boolean isUnpauseOnHeadsetReconnect() {
return prefs.getBoolean(PREF_UNPAUSE_ON_HEADSET_RECONNECT, true);
}
public static boolean isUnpauseOnBluetoothReconnect() {
return prefs.getBoolean(PREF_UNPAUSE_ON_BLUETOOTH_RECONNECT, false);
}
public static int getHardwareForwardButton() {
return Integer.parseInt(prefs.getString(PREF_HARDWARE_FORWARD_BUTTON,
String.valueOf(KeyEvent.KEYCODE_MEDIA_FAST_FORWARD)));
}
public static int getHardwarePreviousButton() {
return Integer.parseInt(prefs.getString(PREF_HARDWARE_PREVIOUS_BUTTON,
String.valueOf(KeyEvent.KEYCODE_MEDIA_REWIND)));
}
public static boolean isFollowQueue() {
return prefs.getBoolean(PREF_FOLLOW_QUEUE, true);
}
/**
* Set to true to enable Continuous Playback
*/
@VisibleForTesting
public static void setFollowQueue(boolean value) {
prefs.edit().putBoolean(UserPreferences.PREF_FOLLOW_QUEUE, value).apply();
}
public static boolean shouldSkipKeepEpisode() { return prefs.getBoolean(PREF_SKIP_KEEPS_EPISODE, true); }
public static boolean shouldFavoriteKeepEpisode() {
return prefs.getBoolean(PREF_FAVORITE_KEEPS_EPISODE, true);
}
public static boolean isAutoDelete() {
return prefs.getBoolean(PREF_AUTO_DELETE, false);
}
public static int getSmartMarkAsPlayedSecs() {
return Integer.parseInt(prefs.getString(PREF_SMART_MARK_AS_PLAYED_SECS, "30"));
}
public static boolean shouldDeleteRemoveFromQueue() {
return prefs.getBoolean(PREF_DELETE_REMOVES_FROM_QUEUE, false);
}
public static float getPlaybackSpeed(MediaType mediaType) {
if (mediaType == MediaType.VIDEO) {
return getVideoPlaybackSpeed();
} else {
return getAudioPlaybackSpeed();
}
}
private static float getAudioPlaybackSpeed() {
try {
return Float.parseFloat(prefs.getString(PREF_PLAYBACK_SPEED, "1.00"));
} catch (NumberFormatException e) {
Log.e(TAG, Log.getStackTraceString(e));
UserPreferences.setPlaybackSpeed(1.0f);
return 1.0f;
}
}
private static float getVideoPlaybackSpeed() {
try {
return Float.parseFloat(prefs.getString(PREF_VIDEO_PLAYBACK_SPEED, "1.00"));
} catch (NumberFormatException e) {
Log.e(TAG, Log.getStackTraceString(e));
UserPreferences.setVideoPlaybackSpeed(1.0f);
return 1.0f;
}
}
public static boolean isSkipSilence() {
return prefs.getBoolean(PREF_PLAYBACK_SKIP_SILENCE, false);
}
public static List<Float> getPlaybackSpeedArray() {
return readPlaybackSpeedArray(prefs.getString(PREF_PLAYBACK_SPEED_ARRAY, null));
}
public static boolean shouldPauseForFocusLoss() {
return prefs.getBoolean(PREF_PAUSE_PLAYBACK_FOR_FOCUS_LOSS, false);
}
/*
* Returns update interval in milliseconds; value 0 means that auto update is disabled
* or feeds are updated at a certain time of day
*/
public static long getUpdateInterval() {
String updateInterval = prefs.getString(PREF_UPDATE_INTERVAL, "0");
if(!updateInterval.contains(":")) {
return readUpdateInterval(updateInterval);
} else {
return 0;
}
}
public static int[] getUpdateTimeOfDay() {
String datetime = prefs.getString(PREF_UPDATE_INTERVAL, "");
if(datetime.length() >= 3 && datetime.contains(":")) {
String[] parts = datetime.split(":");
int hourOfDay = Integer.parseInt(parts[0]);
int minute = Integer.parseInt(parts[1]);
return new int[] { hourOfDay, minute };
} else {
return new int[0];
}
}
public static boolean isAutoUpdateDisabled() {
return prefs.getString(PREF_UPDATE_INTERVAL, "").equals("0");
}
private static boolean isAllowMobileFor(String type) {
HashSet<String> defaultValue = new HashSet<>();
defaultValue.add("images");
Set<String> allowed = prefs.getStringSet(PREF_MOBILE_UPDATE, defaultValue);
return allowed.contains(type);
}
public static boolean isAllowMobileFeedRefresh() {
return isAllowMobileFor("feed_refresh");
}
public static boolean isAllowMobileEpisodeDownload() {
return isAllowMobileFor("episode_download");
}
public static boolean isAllowMobileAutoDownload() {
return isAllowMobileFor("auto_download");
}
public static boolean isAllowMobileStreaming() {
return isAllowMobileFor("streaming");
}
public static boolean isAllowMobileImages() {
return isAllowMobileFor("images");
}
private static void setAllowMobileFor(String type, boolean allow) {
HashSet<String> defaultValue = new HashSet<>();
defaultValue.add("images");
final Set<String> getValueStringSet = prefs.getStringSet(PREF_MOBILE_UPDATE, defaultValue);
final Set<String> allowed = new HashSet<>(getValueStringSet);
if (allow) {
allowed.add(type);
} else {
allowed.remove(type);
}
prefs.edit().putStringSet(PREF_MOBILE_UPDATE, allowed).apply();
}
public static void setAllowMobileFeedRefresh(boolean allow) {
setAllowMobileFor("feed_refresh", allow);
}
public static void setAllowMobileEpisodeDownload(boolean allow) {
setAllowMobileFor("episode_download", allow);
}
public static void setAllowMobileAutoDownload(boolean allow) {
setAllowMobileFor("auto_download", allow);
}
public static void setAllowMobileStreaming(boolean allow) {
setAllowMobileFor("streaming", allow);
}
public static void setAllowMobileImages(boolean allow) {
setAllowMobileFor("images", allow);
}
public static int getParallelDownloads() {
return Integer.parseInt(prefs.getString(PREF_PARALLEL_DOWNLOADS, "4"));
}
public static int getEpisodeCacheSizeUnlimited() {
return context.getResources().getInteger(R.integer.episode_cache_size_unlimited);
}
/**
* Returns the capacity of the episode cache. This method will return the
* negative integer EPISODE_CACHE_SIZE_UNLIMITED if the cache size is set to
* 'unlimited'.
*/
public static int getEpisodeCacheSize() {
return readEpisodeCacheSizeInternal(prefs.getString(PREF_EPISODE_CACHE_SIZE, "20"));
}
public static boolean isEnableAutodownload() {
return prefs.getBoolean(PREF_ENABLE_AUTODL, false);
}
@VisibleForTesting
public static void setEnableAutodownload(boolean enabled) {
prefs.edit().putBoolean(PREF_ENABLE_AUTODL, enabled).apply();
}
public static boolean isEnableAutodownloadOnBattery() {
return prefs.getBoolean(PREF_ENABLE_AUTODL_ON_BATTERY, true);
}
public static boolean isEnableAutodownloadWifiFilter() {
return Build.VERSION.SDK_INT < 29 && prefs.getBoolean(PREF_ENABLE_AUTODL_WIFI_FILTER, false);
}
public static int getFastForwardSecs() {
return prefs.getInt(PREF_FAST_FORWARD_SECS, 30);
}
public static int getRewindSecs() {
return prefs.getInt(PREF_REWIND_SECS, 10);
}
public static String[] getAutodownloadSelectedNetworks() {
String selectedNetWorks = prefs.getString(PREF_AUTODL_SELECTED_NETWORKS, "");
return TextUtils.split(selectedNetWorks, ",");
}
public static void setProxyConfig(ProxyConfig config) {
SharedPreferences.Editor editor = prefs.edit();
editor.putString(PREF_PROXY_TYPE, config.type.name());
if(TextUtils.isEmpty(config.host)) {
editor.remove(PREF_PROXY_HOST);
} else {
editor.putString(PREF_PROXY_HOST, config.host);
}
if(config.port <= 0 || config.port > 65535) {
editor.remove(PREF_PROXY_PORT);
} else {
editor.putInt(PREF_PROXY_PORT, config.port);
}
if(TextUtils.isEmpty(config.username)) {
editor.remove(PREF_PROXY_USER);
} else {
editor.putString(PREF_PROXY_USER, config.username);
}
if(TextUtils.isEmpty(config.password)) {
editor.remove(PREF_PROXY_PASSWORD);
} else {
editor.putString(PREF_PROXY_PASSWORD, config.password);
}
editor.apply();
}
public static ProxyConfig getProxyConfig() {
Proxy.Type type = Proxy.Type.valueOf(prefs.getString(PREF_PROXY_TYPE, Proxy.Type.DIRECT.name()));
String host = prefs.getString(PREF_PROXY_HOST, null);
int port = prefs.getInt(PREF_PROXY_PORT, 0);
String username = prefs.getString(PREF_PROXY_USER, null);
String password = prefs.getString(PREF_PROXY_PASSWORD, null);
return new ProxyConfig(type, host, port, username, password);
}
public static boolean shouldResumeAfterCall() {
return prefs.getBoolean(PREF_RESUME_AFTER_CALL, true);
}
public static boolean isQueueLocked() {
return prefs.getBoolean(PREF_QUEUE_LOCKED, false);
}
public static void setFastForwardSecs(int secs) {
prefs.edit()
.putInt(PREF_FAST_FORWARD_SECS, secs)
.apply();
}
public static void setRewindSecs(int secs) {
prefs.edit()
.putInt(PREF_REWIND_SECS, secs)
.apply();
}
public static void setPlaybackSpeed(float speed) {
prefs.edit()
.putString(PREF_PLAYBACK_SPEED, String.valueOf(speed))
.apply();
}
public static void setVideoPlaybackSpeed(float speed) {
prefs.edit()
.putString(PREF_VIDEO_PLAYBACK_SPEED, String.valueOf(speed))
.apply();
}
public static void setSkipSilence(boolean skipSilence) {
prefs.edit()
.putBoolean(PREF_PLAYBACK_SKIP_SILENCE, skipSilence)
.apply();
}
public static void setPlaybackSpeedArray(List<Float> speeds) {
DecimalFormatSymbols format = new DecimalFormatSymbols(Locale.US);
format.setDecimalSeparator('.');
DecimalFormat speedFormat = new DecimalFormat("0.00", format);
JSONArray jsonArray = new JSONArray();
for (float speed : speeds) {
jsonArray.put(speedFormat.format(speed));
}
prefs.edit()
.putString(PREF_PLAYBACK_SPEED_ARRAY, jsonArray.toString())
.apply();
}
public static void setAutodownloadSelectedNetworks(String[] value) {
prefs.edit()
.putString(PREF_AUTODL_SELECTED_NETWORKS, TextUtils.join(",", value))
.apply();
}
/**
* Sets the update interval value.
*/
public static void setUpdateInterval(long hours) {
prefs.edit()
.putString(PREF_UPDATE_INTERVAL, String.valueOf(hours))
.apply();
// when updating with an interval, we assume the user wants
// to update *now* and then every 'hours' interval thereafter.
AutoUpdateManager.restartUpdateAlarm(context);
}
/**
* Sets the update interval value.
*/
public static void setUpdateTimeOfDay(int hourOfDay, int minute) {
prefs.edit()
.putString(PREF_UPDATE_INTERVAL, hourOfDay + ":" + minute)
.apply();
AutoUpdateManager.restartUpdateAlarm(context);
}
public static void disableAutoUpdate(Context context) {
prefs.edit()
.putString(PREF_UPDATE_INTERVAL, "0")
.apply();
AutoUpdateManager.disableAutoUpdate(context);
}
public static boolean gpodnetNotificationsEnabled() {
if (Build.VERSION.SDK_INT >= 26) {
return true; // System handles notification preferences
}
return prefs.getBoolean(PREF_GPODNET_NOTIFICATIONS, true);
}
/**
* Used for migration of the preference to system notification channels.
*/
public static boolean getGpodnetNotificationsEnabledRaw() {
return prefs.getBoolean(PREF_GPODNET_NOTIFICATIONS, true);
}
public static void setGpodnetNotificationsEnabled() {
prefs.edit()
.putBoolean(PREF_GPODNET_NOTIFICATIONS, true)
.apply();
}
public static void setHiddenDrawerItems(List<String> items) {
String str = TextUtils.join(",", items);
prefs.edit()
.putString(PREF_HIDDEN_DRAWER_ITEMS, str)
.apply();
}
public static void setCompactNotificationButtons(List<Integer> items) {
String str = TextUtils.join(",", items);
prefs.edit()
.putString(PREF_COMPACT_NOTIFICATION_BUTTONS, str)
.apply();
}
public static void setQueueLocked(boolean locked) {
prefs.edit()
.putBoolean(PREF_QUEUE_LOCKED, locked)
.apply();
}
private static int readThemeValue(String valueFromPrefs) {
switch (valueFromPrefs) {
case "0":
return R.style.Theme_AntennaPod_Light;
case "1":
return R.style.Theme_AntennaPod_Dark;
case "2":
return R.style.Theme_AntennaPod_TrueBlack;
default:
int nightMode = context.getResources().getConfiguration().uiMode & Configuration.UI_MODE_NIGHT_MASK;
if (nightMode == Configuration.UI_MODE_NIGHT_YES) {
return R.style.Theme_AntennaPod_Dark;
}
return R.style.Theme_AntennaPod_Light;
}
}
private static long readUpdateInterval(String valueFromPrefs) {
int hours = Integer.parseInt(valueFromPrefs);
return TimeUnit.HOURS.toMillis(hours);
}
private static int readEpisodeCacheSizeInternal(String valueFromPrefs) {
if (valueFromPrefs.equals(context.getString(R.string.pref_episode_cache_unlimited))) {
return EPISODE_CACHE_SIZE_UNLIMITED;
} else {
return Integer.parseInt(valueFromPrefs);
}
}
private static List<Float> readPlaybackSpeedArray(String valueFromPrefs) {
if (valueFromPrefs != null) {
try {
JSONArray jsonArray = new JSONArray(valueFromPrefs);
List<Float> selectedSpeeds = new ArrayList<>();
for (int i = 0; i < jsonArray.length(); i++) {
selectedSpeeds.add((float) jsonArray.getDouble(i));
}
return selectedSpeeds;
} catch (JSONException e) {
Log.e(TAG, "Got JSON error when trying to get speeds from JSONArray");
e.printStackTrace();
}
}
// If this preference hasn't been set yet, return the default options
return Arrays.asList(1.0f, 1.25f, 1.5f);
}
public static String getMediaPlayer() {
return prefs.getString(PREF_MEDIA_PLAYER, PREF_MEDIA_PLAYER_EXOPLAYER);
}
public static boolean useSonic() {
return getMediaPlayer().equals("sonic");
}
public static boolean useExoplayer() {
return getMediaPlayer().equals(PREF_MEDIA_PLAYER_EXOPLAYER);
}
public static void enableSonic() {
prefs.edit().putString(PREF_MEDIA_PLAYER, "sonic").apply();
}
public static void enableExoplayer() {
prefs.edit().putString(PREF_MEDIA_PLAYER, PREF_MEDIA_PLAYER_EXOPLAYER).apply();
}
public static boolean stereoToMono() {
return prefs.getBoolean(PREF_STEREO_TO_MONO, false);
}
public static void stereoToMono(boolean enable) {
prefs.edit()
.putBoolean(PREF_STEREO_TO_MONO, enable)
.apply();
}
public static VideoBackgroundBehavior getVideoBackgroundBehavior() {
switch (prefs.getString(PREF_VIDEO_BEHAVIOR, "pip")) {
case "stop": return VideoBackgroundBehavior.STOP;
case "continue": return VideoBackgroundBehavior.CONTINUE_PLAYING;
case "pip": //Deliberate fall-through
default: return VideoBackgroundBehavior.PICTURE_IN_PICTURE;
}
}
public static EpisodeCleanupAlgorithm getEpisodeCleanupAlgorithm() {
if (!isEnableAutodownload()) {
return new APNullCleanupAlgorithm();
}
int cleanupValue = getEpisodeCleanupValue();
if (cleanupValue == EPISODE_CLEANUP_EXCEPT_FAVORITE) {
return new ExceptFavoriteCleanupAlgorithm();
} else if (cleanupValue == EPISODE_CLEANUP_QUEUE) {
return new APQueueCleanupAlgorithm();
} else if (cleanupValue == EPISODE_CLEANUP_NULL) {
return new APNullCleanupAlgorithm();
} else {
return new APCleanupAlgorithm(cleanupValue);
}
}
public static int getEpisodeCleanupValue() {
return Integer.parseInt(prefs.getString(PREF_EPISODE_CLEANUP, "" + EPISODE_CLEANUP_NULL));
}
public static void setEpisodeCleanupValue(int episodeCleanupValue) {
prefs.edit()
.putString(PREF_EPISODE_CLEANUP, Integer.toString(episodeCleanupValue))
.apply();
}
/**
* Return the folder where the app stores all of its data. This method will
* return the standard data folder if none has been set by the user.
*
* @param type The name of the folder inside the data folder. May be null
* when accessing the root of the data folder.
* @return The data folder that has been requested or null if the folder
* could not be created.
*/
public static File getDataFolder(String type) {
String strDir = prefs.getString(PREF_DATA_FOLDER, null);
if (strDir == null) {
Log.d(TAG, "Using default data folder");
return context.getExternalFilesDir(type);
} else {
File dataDir = new File(strDir);
if (!dataDir.exists()) {
if (!dataDir.mkdir()) {
Log.w(TAG, "Could not create data folder");
return null;
}
}
if (type == null) {
return dataDir;
} else {
// handle path separators
String[] dirs = type.split("/");
for (int i = 0; i < dirs.length; i++) {
if (dirs.length > 0) {
if (i < dirs.length - 1) {
dataDir = getDataFolder(dirs[i]);
if (dataDir == null) {
return null;
}
}
type = dirs[i];
}
}
File typeDir = new File(dataDir, type);
if (!typeDir.exists()) {
if (dataDir.canWrite()) {
if (!typeDir.mkdir()) {
Log.e(TAG, "Could not create data folder named " + type);
return null;
}
}
}
return typeDir;
}
}
}
public static void setDataFolder(String dir) {
Log.d(TAG, "setDataFolder(dir: " + dir + ")");
prefs.edit()
.putString(PREF_DATA_FOLDER, dir)
.apply();
}
/**
* Create a .nomedia file to prevent scanning by the media scanner.
*/
private static void createNoMediaFile() {
File f = new File(context.getExternalFilesDir(null), ".nomedia");
if (!f.exists()) {
try {
f.createNewFile();
} catch (IOException e) {
Log.e(TAG, "Could not create .nomedia file");
e.printStackTrace();
}
Log.d(TAG, ".nomedia file created");
}
}
/**
*
* @return true if auto update is set to a specific time
* false if auto update is set to interval
*/
public static boolean isAutoUpdateTimeOfDay() {
return getUpdateTimeOfDay().length == 2;
}
/**
* Evaluates whether Cast support (Chromecast, Audio Cast, etc) is enabled on the preferences.
*/
public static boolean isCastEnabled() {
return prefs.getBoolean(PREF_CAST_ENABLED, false);
}
public enum VideoBackgroundBehavior {
STOP, PICTURE_IN_PICTURE, CONTINUE_PLAYING
}
public enum BackButtonBehavior {
DEFAULT, OPEN_DRAWER, DOUBLE_TAP, SHOW_PROMPT, GO_TO_PAGE
}
public static BackButtonBehavior getBackButtonBehavior() {
switch (prefs.getString(PREF_BACK_BUTTON_BEHAVIOR, "default")) {
case "drawer": return BackButtonBehavior.OPEN_DRAWER;
case "doubletap": return BackButtonBehavior.DOUBLE_TAP;
case "prompt": return BackButtonBehavior.SHOW_PROMPT;
case "page": return BackButtonBehavior.GO_TO_PAGE;
case "default": // Deliberate fall-through
default: return BackButtonBehavior.DEFAULT;
}
}
public static String getBackButtonGoToPage() {
return prefs.getString(PREF_BACK_BUTTON_GO_TO_PAGE, "QueueFragment");
}
public static void setBackButtonGoToPage(String tag) {
prefs.edit()
.putString(PREF_BACK_BUTTON_GO_TO_PAGE, tag)
.apply();
}
public static boolean timeRespectsSpeed() {
return prefs.getBoolean(PREF_TIME_RESPECTS_SPEED, false);
}
public static boolean isStreamOverDownload() {
return prefs.getBoolean(PREF_STREAM_OVER_DOWNLOAD, false);
}
public static void setStreamOverDownload(boolean stream) {
prefs.edit().putBoolean(PREF_STREAM_OVER_DOWNLOAD, stream).apply();
}
/**
* Returns if the queue is in keep sorted mode.
*
* @see #getQueueKeepSortedOrder()
*/
public static boolean isQueueKeepSorted() {
return prefs.getBoolean(PREF_QUEUE_KEEP_SORTED, false);
}
/**
* Enables/disables the keep sorted mode of the queue.
*
* @see #setQueueKeepSortedOrder(SortOrder)
*/
public static void setQueueKeepSorted(boolean keepSorted) {
prefs.edit()
.putBoolean(PREF_QUEUE_KEEP_SORTED, keepSorted)
.apply();
}
/**
* Returns the sort order for the queue keep sorted mode.
* Note: This value is stored independently from the keep sorted state.
*
* @see #isQueueKeepSorted()
*/
public static SortOrder getQueueKeepSortedOrder() {
String sortOrderStr = prefs.getString(PREF_QUEUE_KEEP_SORTED_ORDER, "use-default");
return SortOrder.parseWithDefault(sortOrderStr, SortOrder.DATE_NEW_OLD);
}
/**
* Sets the sort order for the queue keep sorted mode.
*
* @see #setQueueKeepSorted(boolean)
*/
public static void setQueueKeepSortedOrder(SortOrder sortOrder) {
if (sortOrder == null) {
return;
}
prefs.edit()
.putString(PREF_QUEUE_KEEP_SORTED_ORDER, sortOrder.name())
.apply();
}
public static SubscriptionsFilter getSubscriptionsFilter() {
String value = prefs.getString(PREF_FILTER_FEED, "");
return new SubscriptionsFilter(value);
}
public static void setSubscriptionsFilter(SubscriptionsFilter value) {
prefs.edit()
.putString(PREF_FILTER_FEED, value.serialize())
.apply();
}
public static long getUsageCountingDateMillis() {
return prefs.getLong(PREF_USAGE_COUNTING_DATE, -1);
}
private static void setUsageCountingDateMillis(long value) {
prefs.edit().putLong(PREF_USAGE_COUNTING_DATE, value).apply();
}
public static void resetUsageCountingDate() {
setUsageCountingDateMillis(Calendar.getInstance().getTimeInMillis());
}
public static void unsetUsageCountingDate() {
setUsageCountingDateMillis(-1);
}
}
| 1 | 20,951 | Is it really necessary to store them in a new setting? I think it should be enough to store the credentials in `ProxyConfig.direct()`. Then AntennaPod does not need to store multiple different proxy settings. Also, it will not change existing users' settings (currently the proxy settings would be lost on upgrade). | AntennaPod-AntennaPod | java |
@@ -2115,10 +2115,15 @@ namespace pwiz.Skyline.Model
public static double GetCollisionEnergy(SrmSettings settings, PeptideDocNode nodePep,
TransitionGroupDocNode nodeGroup, TransitionDocNode nodeTran, CollisionEnergyRegression regression, int step)
{
- var ce = nodeTran==null // If we're only given a precursor, use the explicit CE of its children if they all agree
- ? (nodeGroup.Children.Any() && nodeGroup.Children.All( node => ((TransitionDocNode)node).ExplicitValues.CollisionEnergy == ((TransitionDocNode)nodeGroup.Children.First()).ExplicitValues.CollisionEnergy)
- ? ((TransitionDocNode)nodeGroup.Children.First()).ExplicitValues.CollisionEnergy : null)
- : nodeTran.ExplicitValues.CollisionEnergy;
+ // Collision Energy explicitly declared at the transition level is taken to be the correct value.
+ // If that is not present, explicitly declared value at the precursor level is used.
+ // If that is not present, the CE is calculated using the provided regression if any.
+ var ce = nodeTran == null ?
+ // If we're only given a precursor, use the explicit CE of its children if they all agree, else use precursor explicit CE
+ (nodeGroup.Children.Select(node => ((TransitionDocNode)node).ExplicitValues.CollisionEnergy).Distinct().Count() == 1 ?
+ ((TransitionDocNode)nodeGroup.Children.First()).ExplicitValues.CollisionEnergy : null) ?? nodeGroup.ExplicitValues.CollisionEnergy :
+ // Otherwise use explicit precursor CE if no explicit transition CE given
+ nodeTran.ExplicitValues.CollisionEnergy ?? nodeGroup.ExplicitValues.CollisionEnergy;
if (regression != null)
{
if (!ce.HasValue) | 1 | /*
* Original author: Brendan MacLean <brendanx .at. u.washington.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2009 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//TODO transitions and transition group
// Move stuff to refinement
// Note to those extending the document model:
// All objects participating in the document model must be immutable.
// An immutable document has many advantages, primary among those are
// simplifying synchronization in a multi-threaded system, allowing
// eventual consistency, and maintaining a history of entire documents
// at a cost of only the depth of the change in the document tree, with
// many documents sharing the majority of their in memory objects. This
// allows undo/redo that simply points to a document in the history.
//
// Simple immutable objects may have only a constructor and property
// getters with private setters. More complex objects should derive
// from the class Immutable. The should still have only property getters
// and private setters, and should only change at 3 times:
// 1. In a constructor
// 2. During deserialization (immediately after construction)
// 3. In a Change<property>() method, using ImClone()
// Directly modifying an existing object in memory after it has been
// fully constructed, will break undo/redo, since the object may be
// referenced by many documents in the history.
//
// More complex objects should also consider implementing IValidating
// to ensure that the class remains valid in all three of the cases
// described above.
using System;
using System.Collections.Generic;
using System.Linq;
using System.IO;
using System.Text;
using System.Threading;
using System.Xml;
using System.Xml.Schema;
using System.Xml.Serialization;
using pwiz.Common.SystemUtil;
using pwiz.Skyline.Controls.SeqNode;
using pwiz.Skyline.Model.AuditLog;
using pwiz.Skyline.Model.DocSettings;
using pwiz.Skyline.Model.DocSettings.Extensions;
using pwiz.Skyline.Model.Find;
using pwiz.Skyline.Model.IonMobility;
using pwiz.Skyline.Model.Irt;
using pwiz.Skyline.Model.Lib;
using pwiz.Skyline.Model.Optimization;
using pwiz.Skyline.Model.Proteome;
using pwiz.Skyline.Model.Results.Scoring;
using pwiz.Skyline.Model.RetentionTimes;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util;
using pwiz.Skyline.Model.Results;
using pwiz.Skyline.Model.Serialization;
using pwiz.Skyline.Util.Extensions;
namespace pwiz.Skyline.Model
{
/// <summary>
/// Exposes get/set methods for the container of an immutable <see cref="SrmDocument"/>.
/// </summary>
public interface IDocumentContainer
{
/// <summary>
/// Get the current contained document.
/// </summary>
SrmDocument Document { get; }
/// <summary>
/// Get the path to the current save location of the document, or
/// null if the document is not saved.
/// </summary>
string DocumentFilePath { get; }
/// <summary>
/// Set the contained document, if the original document is the same as the
/// current document.
/// </summary>
/// <param name="docNew">A new version of the current document</param>
/// <param name="docOriginal">The version from which the new version was derived</param>
/// <returns>True if the document was successfully set</returns>
bool SetDocument(SrmDocument docNew, SrmDocument docOriginal);
/// <summary>
/// Adds an event handler to the container's document changed event. The
/// event handler must be thread safe, as it may be called on any thread.
/// </summary>
/// <param name="listener">The event handler to add</param>
void Listen(EventHandler<DocumentChangedEventArgs> listener);
/// <summary>
/// Removes an event handler from the container's document changed event.
/// </summary>
/// <param name="listener">The event handler to remove</param>
void Unlisten(EventHandler<DocumentChangedEventArgs> listener);
/// <summary>
/// Returns true if the container is in teardown, and we should not attempt any document changes.
/// </summary>
bool IsClosing { get; }
/// <summary>
/// Tracking active background loaders for a container - helps in test harness SkylineWindow teardown
/// </summary>
IEnumerable<BackgroundLoader> BackgroundLoaders { get; }
void AddBackgroundLoader(BackgroundLoader loader);
void RemoveBackgroundLoader(BackgroundLoader loader);
}
/// <summary>
/// Exposes get and event registering methods for the container of an
/// immutable <see cref="SrmDocument"/>, for use in UI thread components.
/// </summary>
public interface IDocumentUIContainer : IDocumentContainer
{
/// <summary>
/// Get the current document for display in the UI.
/// </summary>
SrmDocument DocumentUI { get; }
/// <summary>
/// Adds an event handler to the container's document UI changed event. The
/// event handler must be thread safe, as it may be called on any thread.
/// </summary>
/// <param name="listener">The event handler to add</param>
void ListenUI(EventHandler<DocumentChangedEventArgs> listener);
/// <summary>
/// Removes an event handler from the container's document UI changed event.
/// </summary>
/// <param name="listener">The event handler to remove</param>
void UnlistenUI(EventHandler<DocumentChangedEventArgs> listener);
/// <summary>
/// Returns focus to the main document UI
/// </summary>
void FocusDocument();
/// <summary>
/// True if the UI is in the middle of an undo/redo operation
/// </summary>
bool InUndoRedo { get; }
}
/// <summary>
/// EventArgs supplied with the <see cref="SkylineWindow.DocumentUIChangedEvent"/>.
/// The previous document is supplied to allow localized modifications based
/// on a diff between the two documents.
/// </summary>
public class DocumentChangedEventArgs : EventArgs
{
public DocumentChangedEventArgs(SrmDocument documentPrevious, bool isOpeningFile = false, bool inSelUpdateLock = false)
{
DocumentPrevious = documentPrevious;
IsInSelUpdateLock = inSelUpdateLock;
IsOpeningFile = isOpeningFile;
}
public SrmDocument DocumentPrevious { get; private set; }
/// <summary>
/// True when SequenceTree.IsInUpdateLock is set, which means the selection
/// cannot be trusted as reflecting the current document.
/// </summary>
public bool IsInSelUpdateLock { get; private set; }
/// <summary>
/// True when the document change is caused by opening a file
/// </summary>
public bool IsOpeningFile { get; private set; }
}
/// <summary>
/// Root <see cref="Identity"/> class for a document.
/// </summary>
public class SrmDocumentId : Identity
{
}
/// <summary>
/// The <see cref="SrmDocument"/> class and all of the model objects it includes
/// are entirely immutable. This means a reference to document is always entirely
/// complete, and requires no synchronization to use with multiple threads.
/// <para>
/// All changes produce a new document with an incremented <see cref="RevisionIndex"/>.
/// On first consideration of this model, it may sound incredibly expensive, but,
/// in fact, the model is used in source code control systems like Subversion,
/// where changing a single file produces a new repository revision without
/// necessarily copying every file in the tree. Only the path from the new
/// immutable child to the root need be modified.
/// </para><para>
/// The model for modifying a document within a multi-threaded system, then
/// becomes:
/// <list type="number">
/// <item><description>Acquire a reference to the current document</description></item>
/// <item><description>Create a modified document based on the revision acquired</description></item>
/// <item><description>Use <see cref="Interlocked.CompareExchange(ref object,object,object)"/>
/// to set the master reference, if it is still equal to the one acquired</description></item>
/// <item><description>If the attempt to set fails, return to the first step.</description></item>
/// </list>
/// </para><para>
/// This also allows the undo/redo stacks to become a simple history of documents,
/// rather than a record of actions taken to modify a mutable document.
/// </para>
/// </summary>
[XmlRoot(@"srm_settings")]
public class SrmDocument : DocNodeParent, IXmlSerializable
{
/// <summary>
/// Document extension on disk
/// </summary>
public const string EXT = ".sky";
public static string FILTER_DOC
{
get { return TextUtil.FileDialogFilter(Resources.SrmDocument_FILTER_DOC_Skyline_Documents, EXT); }
}
public static string FILTER_DOC_AND_SKY_ZIP
{
// Used only in the open file dialog.
get
{
return TextUtil.FileDialogFilter(Resources.SrmDocument_FILTER_DOC_AND_SKY_ZIP_Skyline_Files, EXT,
SrmDocumentSharing.EXT_SKY_ZIP, SkypFile.EXT);
}
}
public static readonly DocumentFormat FORMAT_VERSION = DocumentFormat.CURRENT;
public const int MAX_PEPTIDE_COUNT = 200 * 1000;
public const int MAX_TRANSITION_COUNT = 5 * 1000 * 1000;
public static int _maxTransitionCount = Install.Is64Bit ? MAX_TRANSITION_COUNT : MAX_TRANSITION_COUNT/5; // To keep from running out of memory on 32-bit
public static int MaxTransitionCount
{
get { return _maxTransitionCount; }
}
/// <summary>
/// For testing to avoid needing to create 5,000,000 transitions to test transition count limits
/// </summary>
public static void SetTestMaxTransitonCount(int max)
{
_maxTransitionCount = max;
}
// Version of this document in deserialized XML
public SrmDocument(SrmSettings settings)
: base(new SrmDocumentId(), Annotations.EMPTY, new PeptideGroupDocNode[0], false)
{
FormatVersion = FORMAT_VERSION;
Settings = settings;
AuditLog = new AuditLogList();
SetDocumentType(); // Note proteomics vs molecule vs mixed (as we're empty, will be set to none)
}
private SrmDocument(SrmDocument doc, SrmSettings settings, Action<SrmDocument> changeProps = null)
: base(doc.Id, Annotations.EMPTY, doc.Children, false)
{
FormatVersion = doc.FormatVersion;
RevisionIndex = doc.RevisionIndex;
UserRevisionIndex = doc.UserRevisionIndex;
Settings = doc.UpdateHasHeavyModifications(settings);
AuditLog = doc.AuditLog;
DocumentHash = doc.DocumentHash;
DeferSettingsChanges = doc.DeferSettingsChanges;
DocumentType = doc.DocumentType;
if (changeProps != null)
changeProps(this);
}
/// <summary>
/// Notes document contents type: proteomic, small molecule, or mixed (empty reports as proteomic),
/// which allows for quick discrimination between needs for proteomic and small molecule behavior.
/// N.B. For construction time and <see cref="OnChangingChildren"/> only!!! Mustn't break immutabilty contract.
///
/// </summary>
private void SetDocumentType()
{
var hasPeptides = false;
var hasSmallMolecules = false;
foreach (var tg in MoleculeTransitionGroups)
{
hasPeptides |= !tg.IsCustomIon;
hasSmallMolecules |= tg.IsCustomIon;
}
if (hasSmallMolecules && hasPeptides)
{
DocumentType = DOCUMENT_TYPE.mixed;
}
else if (hasSmallMolecules)
{
DocumentType = DOCUMENT_TYPE.small_molecules;
}
else if (hasPeptides)
{
DocumentType = DOCUMENT_TYPE.proteomic;
}
else
{
DocumentType = DOCUMENT_TYPE.none;
}
Settings = UpdateHasHeavyModifications(Settings);
}
public override AnnotationDef.AnnotationTarget AnnotationTarget {
get { throw new InvalidOperationException();}
}
public DocumentFormat FormatVersion { get; private set; }
/// <summary>
/// Monotonically increasing index, incremented each time a modified
/// document is created. Works much like the revision count in Subversion.
/// And the immutable document architecture itself may have its roots in
/// source code control.
///
/// This index is in memory only, and is started at zero each time the
/// document is loaded from disk.
///
/// Also, this index is not included in the document content equality
/// functions, because doing so would break the main use case for document
/// equality: unit testing.
/// </summary>
public int RevisionIndex { get; private set; }
/// <summary>
/// Much like RevisionIndex, only it is incremented each time the user
/// changes the document. i.e. any time an Undo/Redo record is created.
/// </summary>
public int UserRevisionIndex { get; private set; }
/// <summary>
/// Document-wide settings information
/// </summary>
public SrmSettings Settings { get; private set; }
/// <summary>
/// Document hash that gets updated when the document is opened/saved
/// </summary>
public string DocumentHash { get; private set; }
public AuditLogList AuditLog { get; private set; }
public Targets Targets { get { return new Targets(this);} }
public bool DeferSettingsChanges { get; private set; }
/// <summary>
/// Convenience access to the <see cref="MeasuredResults"/> for easier debugging.
/// </summary>
public MeasuredResults MeasuredResults { get { return Settings.MeasuredResults; } }
/// <summary>
/// Node level depths below this node
/// </summary>
// ReSharper disable InconsistentNaming
public enum Level { MoleculeGroups, Molecules, TransitionGroups, Transitions }
// ReSharper restore InconsistentNaming
public int MoleculeGroupCount { get { return GetCount((int)Level.MoleculeGroups); } }
public int MoleculeCount { get { return GetCount((int)Level.Molecules); } }
public int MoleculeTransitionGroupCount { get { return GetCount((int)Level.TransitionGroups); } }
public int MoleculeTransitionCount { get { return GetCount((int)Level.Transitions); } }
// Convenience functions for ignoring non-proteomic (CustomIon) nodes - that is, getting only peptides
public int PeptideGroupCount { get { return PeptideGroups.Count(); } }
public int PeptideCount { get { return Peptides.Count(); } }
public int PeptideTransitionGroupCount { get { return PeptideTransitionGroups.Count(); } }
public int PeptideTransitionCount { get { return PeptideTransitions.Count(); } }
// Convenience functions for ignoring proteomic nodes - that is, getting only custom ions
public int CustomIonCount { get { return CustomMolecules.Count(); } }
/// <summary>
/// Quick access to document type proteomic/small_molecules/mixed, based on the assumption that
/// TransitionGroups are purely proteomic or small molecule, but the document is not.
///
/// Empty documents report as none.
///
/// These enum names are used on persisted settings for UI mode, so don't rename them as
/// it will confuse existing installations.
///
/// </summary>
public enum DOCUMENT_TYPE
{
proteomic,
small_molecules,
mixed,
none // empty documents return this
};
public DOCUMENT_TYPE DocumentType { get; private set; }
public bool IsEmptyOrHasPeptides { get { return DocumentType != DOCUMENT_TYPE.small_molecules; } }
public bool HasPeptides { get { return DocumentType == DOCUMENT_TYPE.proteomic || DocumentType == DOCUMENT_TYPE.mixed; } }
public bool HasSmallMolecules { get { return DocumentType == DOCUMENT_TYPE.small_molecules || DocumentType == DOCUMENT_TYPE.mixed; } }
/// <summary>
/// Return all <see cref="PeptideGroupDocNode"/>s of any kind
/// </summary>
public IEnumerable<PeptideGroupDocNode> MoleculeGroups
{
get
{
return Children.Cast<PeptideGroupDocNode>();
}
}
/// <summary>
/// Return all <see cref="PeptideGroupDocNode"/>s that contain peptides
/// </summary>
public IEnumerable<PeptideGroupDocNode> PeptideGroups
{
get
{
return MoleculeGroups.Where(p => p.IsProteomic);
}
}
/// <summary>
/// Return all <see cref="PeptideDocNode"/> of any kind
/// </summary>
public IEnumerable<PeptideDocNode> Molecules
{
get
{
return MoleculeGroups.SelectMany(node => node.Molecules);
}
}
/// <summary>
/// Return all <see cref="PeptideDocNode"/> that are actual peptides
/// </summary>
public IEnumerable<PeptideDocNode> Peptides
{
get
{
return Molecules.Where(p => !p.Peptide.IsCustomMolecule);
}
}
/// <summary>
/// Return all <see cref="PeptideDocNode"/> that are custom molecules
/// </summary>
public IEnumerable<PeptideDocNode> CustomMolecules
{
get
{
return Molecules.Where(p => p.Peptide.IsCustomMolecule);
}
}
/// <summary>
/// Return all <see cref="TransitionGroupDocNode"/> of any kind
/// </summary>
public IEnumerable<TransitionGroupDocNode> MoleculeTransitionGroups
{
get
{
return Molecules.SelectMany(node => node.Children.Cast<TransitionGroupDocNode>());
}
}
/// <summary>
/// Return all <see cref="TransitionGroupDocNode"/> whose members are peptide precursors
/// </summary>
public IEnumerable<TransitionGroupDocNode> PeptideTransitionGroups
{
get
{
return MoleculeTransitionGroups.Where(t => !t.TransitionGroup.Peptide.IsCustomMolecule);
}
}
public IEnumerable<PeptidePrecursorPair> MoleculePrecursorPairs
{
get
{
return Molecules.SelectMany(
node => node.TransitionGroups.Select(nodeGroup => new PeptidePrecursorPair(node, nodeGroup)));
}
}
public IEnumerable<PeptidePrecursorPair> PeptidePrecursorPairs
{
get
{
return Peptides.SelectMany(
node => node.TransitionGroups.Select(nodeGroup => new PeptidePrecursorPair(node, nodeGroup)));
}
}
public IEnumerable<LibKey> MoleculeLibKeys
{
get
{
return Molecules.SelectMany(
node => node.TransitionGroups.Select(nodeGroup => nodeGroup.GetLibKey(Settings, node)));
}
}
/// <summary>
/// Return a list of <see cref="TransitionDocNode"/> of any kind
/// </summary>
public IEnumerable<TransitionDocNode> MoleculeTransitions
{
get
{
return MoleculeTransitionGroups.SelectMany(node => node.Children.Cast<TransitionDocNode>());
}
}
/// <summary>
/// Return a list of <see cref="TransitionDocNode"/> that are in peptides
/// </summary>
public IEnumerable<TransitionDocNode> PeptideTransitions
{
get
{
return MoleculeTransitions.Where(t => !t.Transition.Group.IsCustomIon);
}
}
public HashSet<Target> GetRetentionTimeStandards()
{
try
{
return GetRetentionTimeStandardsOrThrow();
}
catch (Exception)
{
return new HashSet<Target>();
}
}
public bool HasAllRetentionTimeStandards()
{
try
{
GetRetentionTimeStandardsOrThrow();
return true;
}
catch (Exception)
{
return false;
}
}
private HashSet<Target> GetRetentionTimeStandardsOrThrow()
{
var rtRegression = Settings.PeptideSettings.Prediction.RetentionTime;
if (rtRegression == null || rtRegression.Calculator == null)
return new HashSet<Target>();
var regressionPeps = rtRegression.Calculator.GetStandardPeptides(Peptides.Select(
nodePep => Settings.GetModifiedSequence(nodePep)));
return new HashSet<Target>(regressionPeps);
}
/// <summary>
/// True when any PeptideGroupDocNodes lack complete protein metadata
/// </summary>
public bool IsProteinMetadataPending { get; private set; }
/// <summary>
/// True if the parts of a Skyline document affected by a Save As command are loaded
/// </summary>
public bool IsSavable
{
get
{
// Results cache file must be fully created before a Save As, since it has
// the same base name as the document
return (!Settings.HasResults || Settings.MeasuredResults.IsLoaded) &&
// Document libraries also have the same base name as the document and must be copied
(!Settings.HasLibraries || !Settings.HasDocumentLibrary || Settings.PeptideSettings.Libraries.IsLoaded);
}
}
/// <summary>
/// Returns non-localized strings describing any unloadedness in the document
/// TODO: there are still issues with this, like errors importing results
/// </summary>
public IEnumerable<string> NonLoadedStateDescriptions
{
get
{
string whyNot;
if (Settings.HasResults && (whyNot = Settings.MeasuredResults.IsNotLoadedExplained) != null)
yield return @"Settings.MeasuredResults " + whyNot;
if (Settings.HasLibraries && (whyNot = Settings.PeptideSettings.Libraries.IsNotLoadedExplained)!=null)
yield return @"Settings.PeptideSettings.Libraries: " + whyNot;
if ((whyNot = IrtDbManager.IsNotLoadedDocumentExplained(this)) != null)
yield return whyNot;
if ((whyNot = OptimizationDbManager.IsNotLoadedDocumentExplained(this)) != null)
yield return whyNot;
if ((whyNot = DocumentRetentionTimes.IsNotLoadedExplained(Settings)) != null)
yield return whyNot;
if ((whyNot = IonMobilityLibraryManager.IsNotLoadedDocumentExplained(this)) != null)
yield return whyNot;
// BackgroundProteome?
}
}
public IEnumerable<string> NonLoadedStateDescriptionsFull
{
get
{
foreach (var desc in NonLoadedStateDescriptions)
yield return desc;
string whyNot;
var pepSet = Settings.PeptideSettings;
if ((whyNot = BackgroundProteomeManager.IsNotLoadedExplained(pepSet, pepSet.BackgroundProteome, true)) != null)
yield return whyNot;
}
}
/// <summary>
/// True if all parts of the document loaded by background loaders have completed loading
/// TODO: there are still issues with this, like errors importing results
/// </summary>
public bool IsLoaded
{
get { return !NonLoadedStateDescriptions.Any(); }
}
public PeptideGroupDocNode FindPeptideGroup(PeptideGroup fastaSequence)
{
foreach (var peptideGroup in PeptideGroups)
{
if (peptideGroup.PeptideGroup.Sequence == fastaSequence.Sequence)
return peptideGroup;
}
return null;
}
public IdentityPath LastNodePath
{
get
{
DocNodeParent parent = this;
IdentityPath path = IdentityPath.ROOT;
while (parent != null && parent.Children.Count > 0)
{
path = new IdentityPath(path, parent.Children[parent.Children.Count - 1].Id);
parent = parent.Children[parent.Children.Count - 1] as DocNodeParent;
}
return path;
}
}
public SrmDocument ChangeDocumentHash(string hash)
{
return ChangeProp(ImClone(this), im => im.DocumentHash = hash);
}
public SrmDocument ChangeAuditLog(AuditLogList log)
{
return ChangeProp(ImClone(this), im => im.AuditLog = log);
}
public SrmDocument ChangeAuditLog(AuditLogEntry entries)
{
return ChangeAuditLog(new AuditLogList(entries));
}
private string GetMoleculeGroupId(string baseId)
{
HashSet<string> ids = new HashSet<string>();
foreach (PeptideGroupDocNode nodeGroup in Children)
ids.Add(nodeGroup.Name);
int i = 1;
while (ids.Contains(baseId + i))
i++;
return baseId + i;
}
public string GetSmallMoleculeGroupId()
{
return GetMoleculeGroupId(Resources.SrmDocument_GetSmallMoleculeGroupId_molecules);
}
public string GetPeptideGroupId(bool peptideList)
{
string baseId = peptideList
? Resources.SrmDocument_GetPeptideGroupId_peptides
: Resources.SrmDocument_GetPeptideGroupId_sequence;
return GetMoleculeGroupId(baseId);
}
public bool CanTrigger(int? replicateIndex)
{
return Molecules.All(p => p.CanTrigger(replicateIndex));
}
public bool IsMixedPolarity()
{
return MoleculeTransitionGroups.Any(tg => tg.TransitionGroup.PrecursorCharge < 0) &&
MoleculeTransitionGroups.Any(tg => tg.TransitionGroup.PrecursorCharge > 0);
}
public bool CanSchedule(bool singleWindow)
{
return Settings.PeptideSettings.Prediction.CanSchedule(this, singleWindow
? PeptidePrediction.SchedulingStrategy.single_window
: PeptidePrediction.SchedulingStrategy.all_variable_window);
}
private bool CalcIsProteinMetadataPending()
{
// Non proteomic molecules never do protein metadata searches
var unsearched = (from pg in PeptideGroups where pg.ProteinMetadata.NeedsSearch() select pg);
return unsearched.Any();
}
public SrmDocument IncrementUserRevisionIndex()
{
return ChangeProp(ImClone(this), im => im.UserRevisionIndex++);
}
/// <summary>
/// Make sure every new copy of a document gets an incremented value
/// for <see cref="RevisionIndex"/>.
/// </summary>
/// <param name="clone">The new copy of the document</param>
/// <param name="indexReplaced">Index to a single replaced node, if that is why the children are changing</param>
protected override IList<DocNode> OnChangingChildren(DocNodeParent clone, int indexReplaced)
{
if (ReferenceEquals(clone, this))
return Children;
SrmDocument docClone = (SrmDocument)clone;
docClone.RevisionIndex = RevisionIndex + 1;
// Make sure peptide standards lists are up to date
docClone.Settings = docClone.Settings.CachePeptideStandards(Children, docClone.Children);
// Note protein metadata readiness
docClone.IsProteinMetadataPending = docClone.CalcIsProteinMetadataPending();
// If iRT standards have changed, reset auto-calculated conversion to make sure they are
// updated on a background thread
if (!ReferenceEquals(Settings.GetPeptideStandards(StandardType.IRT),
docClone.Settings.GetPeptideStandards(StandardType.IRT)) &&
docClone.Settings.PeptideSettings.Prediction.RetentionTime != null)
{
docClone.Settings = docClone.Settings.ChangePeptidePrediction(p =>
p.ChangeRetentionTime(p.RetentionTime.ForceRecalculate()));
}
// Note document contents type: proteomic, small molecule, or mixed (empty reports as proteomic)
if (!DeferSettingsChanges)
{
docClone.SetDocumentType();
}
// If this document has associated results, update the results
// for any peptides that have changed.
if (!Settings.HasResults || DeferSettingsChanges)
return docClone.Children;
// Store indexes to previous results in a dictionary for lookup
var dictPeptideIdPeptide = new Dictionary<int, PeptideDocNode>();
// Unless the normalization standards have changed, which require recalculating of all ratios
if (ReferenceEquals(Settings.GetPeptideStandards(StandardType.GLOBAL_STANDARD),
docClone.Settings.GetPeptideStandards(StandardType.GLOBAL_STANDARD)))
{
foreach (var nodePeptide in Molecules)
{
if (nodePeptide != null) // Or previous peptides were freed during command-line peak picking
dictPeptideIdPeptide.Add(nodePeptide.Peptide.GlobalIndex, nodePeptide);
}
}
return docClone.UpdateResultsSummaries(docClone.Children, dictPeptideIdPeptide);
}
/// <summary>
/// Update results for the changed peptides. This needs to start
/// at the peptide level, because peptides have useful peak picking information
/// like predicted retention time, and multiple measured precursors.
/// </summary>
private IList<DocNode> UpdateResultsSummaries(IList<DocNode> children, IDictionary<int, PeptideDocNode> dictPeptideIdPeptide)
{
// Perform main processing for peptides in parallel
var diffResults = new SrmSettingsDiff(Settings, true);
var moleculeGroupPairs = GetMoleculeGroupPairs(children);
var moleculeNodes = new PeptideDocNode[moleculeGroupPairs.Length];
ParallelEx.For(0, moleculeGroupPairs.Length, i =>
{
var pair = moleculeGroupPairs[i];
var nodePep = pair.NodeMolecule;
int index = nodePep.Peptide.GlobalIndex;
PeptideDocNode nodeExisting;
if (dictPeptideIdPeptide.TryGetValue(index, out nodeExisting) &&
ReferenceEquals(nodeExisting, nodePep))
moleculeNodes[i] = nodePep;
else
moleculeNodes[i] = nodePep.ChangeSettings(Settings, diffResults);
});
return RegroupMolecules(children, moleculeNodes);
}
/// <summary>
/// Returns a flat list of <see cref="MoleculeGroupPair"/> in the document for use in parallel
/// processing of all molecules in the document.
/// </summary>
public MoleculeGroupPair[] GetMoleculeGroupPairs()
{
return GetMoleculeGroupPairs(Children, MoleculeCount);
}
/// <summary>
/// Returns a flat list of <see cref="MoleculeGroupPair"/>, given a list of <see cref="PeptideGroupDocNode"/>
/// children, for use in parallel processing of all molecules in the children.
/// </summary>
private static MoleculeGroupPair[] GetMoleculeGroupPairs(IList<DocNode> children)
{
return GetMoleculeGroupPairs(children, children.Cast<PeptideGroupDocNode>().Sum(g => g.MoleculeCount));
}
/// <summary>
/// Returns a flat list of <see cref="MoleculeGroupPair"/>, given a list of <see cref="PeptideGroupDocNode"/>
/// children and the total number of molecules they contain, for use in parallel processing of all molecules
/// in the children.
/// </summary>
private static MoleculeGroupPair[] GetMoleculeGroupPairs(IList<DocNode> children, int moleculeCount)
{
var result = new MoleculeGroupPair[moleculeCount];
int currentMolecule = 0;
foreach (PeptideGroupDocNode nodeMoleculeGroup in children)
{
foreach (var nodeMolecule in nodeMoleculeGroup.Molecules)
{
result[currentMolecule++] = new MoleculeGroupPair(nodeMoleculeGroup, nodeMolecule);
}
}
return result;
}
/// <summary>
/// Regroup a flat list of molecules produced by iterating over the results of
/// <see cref="GetMoleculeGroupPairs"/>
/// </summary>
/// <param name="children">A starting children of <see cref="PeptideGroupDocNode"/> objects</param>
/// <param name="moleculeNodes">A flat list of <see cref="PeptideDocNode"/> objects</param>
/// <param name="rankChildren">Function to rank peptides in their final list</param>
/// <returns>A list of <see cref="PeptideGroupDocNode"/> objects with the original structure and the new <see cref="PeptideDocNode"/> objects</returns>
private static IList<DocNode> RegroupMolecules(IList<DocNode> children, PeptideDocNode[] moleculeNodes,
Func<PeptideGroupDocNode, IList<DocNode>, IList<DocNode>> rankChildren = null)
{
var newMoleculeGroups = new DocNode[children.Count];
int moleculeNodeIndex = 0;
for (int i = 0; i < newMoleculeGroups.Length; i++)
{
var nodeGroup = (PeptideGroupDocNode)children[i];
IList<DocNode> newChildren = new DocNode[nodeGroup.Children.Count];
for (int childIndex = 0; childIndex < newChildren.Count; childIndex++)
{
newChildren[childIndex] = moleculeNodes[moleculeNodeIndex++];
}
if (rankChildren != null)
newChildren = rankChildren(nodeGroup, newChildren);
newMoleculeGroups[i] = nodeGroup.ChangeChildrenChecked(newChildren);
}
if (ArrayUtil.ReferencesEqual(children, newMoleculeGroups))
return children;
return newMoleculeGroups;
}
/// <summary>
/// Struct that pairs <see cref="PeptideGroupDocNode"/> with <see cref="PeptideDocNode"/> for
/// use in a flat list that enables parallel processing.
/// </summary>
public struct MoleculeGroupPair
{
public MoleculeGroupPair(PeptideGroupDocNode nodeMoleculeGroup, PeptideDocNode nodeMolecule)
: this()
{
NodeMoleculeGroup = nodeMoleculeGroup;
NodeMolecule = nodeMolecule;
}
public PeptideGroupDocNode NodeMoleculeGroup { get; private set; }
public PeptideDocNode NodeMolecule { get; private set; }
public PeptideDocNode ReleaseMolecule()
{
var nodeMol = NodeMolecule;
NodeMolecule = null;
return nodeMol;
}
}
/// <summary>
/// Creates a cloned instance of the document with a new <see cref="Settings"/>
/// value, updating the <see cref="DocNode"/> hierarchy to reflect the change.
/// </summary>
/// <param name="settingsNew">New settings value</param>
/// <param name="progressMonitor">Progress monitor for long settings change operations</param>
/// <returns>A new document revision</returns>
public SrmDocument ChangeSettings(SrmSettings settingsNew, SrmSettingsChangeMonitor progressMonitor = null)
{
// Preserve measured results. Call ChangeMeasureResults to change the
// MeasuredResults property on the SrmSettings.
if (!ReferenceEquals(Settings.MeasuredResults, settingsNew.MeasuredResults))
settingsNew = settingsNew.ChangeMeasuredResults(Settings.MeasuredResults);
return ChangeSettingsInternal(settingsNew, progressMonitor);
}
/// <summary>
/// Creates a cloned instance of the document with a new <see cref="Settings"/>
/// value, wihtout updating the <see cref="DocNode"/> hierarchy to reflect the change.
/// </summary>
/// <param name="settingsNew">New settings value</param>
/// <returns>A new document revision</returns>
public SrmDocument ChangeSettingsNoDiff(SrmSettings settingsNew)
{
return new SrmDocument(this, settingsNew, doc =>
{
doc.RevisionIndex++;
doc.IsProteinMetadataPending = doc.CalcIsProteinMetadataPending();
});
}
/// <summary>
/// Creates a cloned instance of the document with a new <see cref="Settings"/>
/// value, which is itself a clone of the previous settings with a new
/// <see cref="MeasuredResults"/> value.
/// </summary>
/// <param name="results">New <see cref="MeasuredResults"/> instance to associate with this document</param>
/// <param name="progressMonitor">Progress monitor for long settings change operations</param>
/// <returns>A new document revision</returns>
public SrmDocument ChangeMeasuredResults(MeasuredResults results, SrmSettingsChangeMonitor progressMonitor = null)
{
return ChangeSettingsInternal(Settings.ChangeMeasuredResults(results), progressMonitor);
}
/// <summary>
/// Creates a cloned instance of the document with a new <see cref="Settings"/>
/// value.
/// </summary>
/// <param name="settingsNew">New settings value</param>
/// <param name="progressMonitor">Progress monitor for long settings change operations</param>
/// <returns>A new document revision</returns>
private SrmDocument ChangeSettingsInternal(SrmSettings settingsNew, SrmSettingsChangeMonitor progressMonitor = null)
{
settingsNew = UpdateHasHeavyModifications(settingsNew);
// First figure out what changed.
SrmSettingsDiff diff = new SrmSettingsDiff(Settings, settingsNew);
if (progressMonitor != null)
{
progressMonitor.GroupCount = MoleculeGroupCount;
if (!diff.DiffPeptides)
progressMonitor.MoleculeCount = MoleculeCount;
diff.Monitor = progressMonitor;
}
// If there were no changes that require DocNode tree updates
if (DeferSettingsChanges || !diff.RequiresDocNodeUpdate)
return ChangeSettingsNoDiff(settingsNew);
else
{
IList<DocNode> childrenNew;
if (diff.DiffPeptides)
{
// Changes on peptides need to be done on the peptide groups, which
// may not achieve that great parallelism, if there is a very large
// peptide group, like Decoys
var childrenParallel = new DocNode[Children.Count];
var settingsParallel = settingsNew;
int currentPeptide = 0;
int totalPeptides = Children.Count;
// If we are looking at peptide uniqueness against a background proteome,
// it's faster to do those checks with a comprehensive list of peptides of
// potential interest rather than taking them one by one.
// So we'll precalculate the peptides using any other filter settings
// before we go on to apply the uniqueness check.
var uniquenessPrecheckChildren = new List<PeptideDocNode>[Children.Count];
Dictionary<Target, bool> uniquenessDict = null;
if (settingsNew.PeptideSettings.Filter.PeptideUniqueness != PeptideFilter.PeptideUniquenessConstraint.none &&
!settingsNew.PeptideSettings.NeedsBackgroundProteomeUniquenessCheckProcessing)
{
// Generate the peptide docnodes with no uniqueness filter
var settingsNoUniquenessFilter =
settingsNew.ChangePeptideSettings(
settingsNew.PeptideSettings.ChangeFilter(
settingsNew.PeptideSettings.Filter.ChangePeptideUniqueness(
PeptideFilter.PeptideUniquenessConstraint.none)));
uniquenessPrecheckChildren = new List<PeptideDocNode>[Children.Count];
totalPeptides *= 2; // We have to run the list twice
ParallelEx.For(0, Children.Count, i =>
{
if (progressMonitor != null)
{
var percentComplete = ProgressStatus.ThreadsafeIncementPercent(ref currentPeptide, totalPeptides);
if (percentComplete.HasValue && percentComplete.Value < 100)
progressMonitor.ChangeProgress(status => status.ChangePercentComplete(percentComplete.Value));
}
var nodeGroup = (PeptideGroupDocNode)Children[i];
uniquenessPrecheckChildren[i] = nodeGroup.GetPeptideNodes(settingsNoUniquenessFilter, true).ToList();
});
var uniquenessPrecheckPeptidesOfInterest = new List<Target>(uniquenessPrecheckChildren.SelectMany(u => u.Select(p => p.Peptide.Target)));
// Update cache for uniqueness checks against the background proteome while we have worker threads available
uniquenessDict = settingsNew.PeptideSettings.Filter.CheckPeptideUniqueness(settingsNew, uniquenessPrecheckPeptidesOfInterest, progressMonitor);
}
// Now perform or complete the peptide selection
ParallelEx.For(0, Children.Count, i =>
{
if (progressMonitor != null)
{
if (progressMonitor.IsCanceled())
throw new OperationCanceledException();
var percentComplete = ProgressStatus.ThreadsafeIncementPercent(ref currentPeptide, totalPeptides);
if (percentComplete.HasValue && percentComplete.Value < 100)
progressMonitor.ChangeProgress(status => status.ChangePercentComplete(percentComplete.Value));
}
var nodeGroup = (PeptideGroupDocNode)Children[i];
childrenParallel[i] = nodeGroup.ChangeSettings(settingsParallel, diff,
new DocumentSettingsContext(uniquenessPrecheckChildren[i], uniquenessDict));
});
childrenNew = childrenParallel;
}
else
{
// Changes that do not change the peptides can be done quicker with
// parallel enumeration of the peptides
var moleculeGroupPairs = GetMoleculeGroupPairs(Children);
var resultsHandler = settingsNew.PeptideSettings.Integration.ResultsHandler;
if (resultsHandler != null && resultsHandler.FreeImmutableMemory)
{
// Break immutability (command-line only!) and release the peptides (children of the children)
// so that their memory is freed after they have been processed
foreach (DocNodeParent child in Children)
child.ReleaseChildren();
}
var moleculeNodes = new PeptideDocNode[moleculeGroupPairs.Length];
var settingsParallel = settingsNew;
int currentMoleculeGroupPair = 0;
ParallelEx.For(0, moleculeGroupPairs.Length, i =>
{
if (progressMonitor != null)
{
if (progressMonitor.IsCanceled())
throw new OperationCanceledException();
var percentComplete = ProgressStatus.ThreadsafeIncementPercent(ref currentMoleculeGroupPair, moleculeGroupPairs.Length);
if (percentComplete.HasValue && percentComplete.Value < 100)
progressMonitor.ChangeProgress(status => status.ChangePercentComplete(percentComplete.Value));
}
var nodePep = moleculeGroupPairs[i].ReleaseMolecule();
moleculeNodes[i] = nodePep.ChangeSettings(settingsParallel, diff);
});
childrenNew = RegroupMolecules(Children, moleculeNodes,
(nodeGroup, children) => nodeGroup.RankChildren(settingsParallel, children));
}
// Results handler changes for re-integration last only long enough
// to change the children
if (settingsNew.PeptideSettings.Integration.ResultsHandler != null)
settingsNew = settingsNew.ChangePeptideIntegration(i => i.ChangeResultsHandler(null));
// Don't change the children, if the resulting list contains
// only reference equal children of the same length and in the
// same order.
if (ArrayUtil.ReferencesEqual(childrenNew, Children))
return ChangeSettingsNoDiff(settingsNew);
return (SrmDocument)new SrmDocument(this, settingsNew).ChangeChildren(childrenNew);
}
}
private SrmSettings UpdateHasHeavyModifications(SrmSettings settings)
{
bool hasHeavyModifications = settings.PeptideSettings.Modifications.GetHeavyModifications()
.Any(mods => mods.Modifications.Count > 0);
if (!hasHeavyModifications && HasSmallMolecules)
{
foreach (var molecule in Molecules)
{
if (molecule.TransitionGroups.Any(group =>
!ReferenceEquals(group.TransitionGroup.LabelType, IsotopeLabelType.light)))
{
hasHeavyModifications = true;
break;
}
}
}
if (hasHeavyModifications == settings.PeptideSettings.Modifications.HasHeavyModifications)
{
return settings;
}
return settings.ChangePeptideSettings(settings.PeptideSettings.ChangeModifications(
settings.PeptideSettings.Modifications.ChangeHasHeavyModifications(hasHeavyModifications)));
}
public SrmDocument ImportDocumentXml(TextReader reader,
string filePath,
MeasuredResults.MergeAction resultsAction,
bool mergePeptides,
PeptideLibraries.FindLibrary findLibrary,
MappedList<string, StaticMod> staticMods,
MappedList<string, StaticMod> heavyMods,
IdentityPath to,
out IdentityPath firstAdded,
out IdentityPath nextAdd,
bool pasteToPeptideList)
{
try
{
PeptideModifications.SetSerializationContext(Settings.PeptideSettings.Modifications);
XmlSerializer ser = new XmlSerializer(typeof(SrmDocument));
SrmDocument docImport = (SrmDocument) ser.Deserialize(reader);
// Add import modifications to default modifications.
docImport.Settings.UpdateDefaultModifications(false);
var docNew = this;
var settingsNew = docNew.Settings;
var settingsOld = docImport.Settings;
if (settingsOld.MeasuredResults != null)
settingsOld = settingsOld.ChangeMeasuredResults(settingsOld.MeasuredResults.ClearDeserialized());
// Merge results from import document with current document.
MeasuredResults resultsBase;
MeasuredResults resultsNew = MeasuredResults.MergeResults(settingsNew.MeasuredResults,
settingsOld.MeasuredResults, filePath, resultsAction, out resultsBase);
if (!ReferenceEquals(resultsNew, settingsNew.MeasuredResults))
settingsNew = settingsNew.ChangeMeasuredResults(resultsNew);
if (!ReferenceEquals(resultsBase, settingsOld.MeasuredResults))
settingsOld = settingsOld.ChangeMeasuredResults(resultsBase);
// Merge library specs from import document with current document.
settingsNew = settingsNew.ChangePeptideLibraries(lib =>
lib.MergeLibrarySpecs(docImport.Settings.PeptideSettings.Libraries, findLibrary));
if(!Equals(settingsNew, docNew.Settings))
{
// Use internal settings change to preserve any changes to the measured results
docNew = docNew.ChangeSettingsInternal(settingsNew);
}
var settingsDiff = new SrmSettingsDiff(settingsOld, settingsNew, true);
IList<PeptideGroupDocNode> peptideGroups = docImport.MoleculeGroups.ToList();
if (pasteToPeptideList)
{
PeptideGroupDocNode peptideGroupDocNode = new PeptideGroupDocNode(new PeptideGroup(), null, null, new PeptideDocNode[0]);
IList<DocNode> peptides = docImport.Molecules.Cast<DocNode>().ToList();
peptideGroupDocNode = (PeptideGroupDocNode) peptideGroupDocNode.ChangeChildren(peptides);
peptideGroups = new List<PeptideGroupDocNode> {peptideGroupDocNode};
}
// Create new explicit modifications for peptides and set auto-manage children
// when necessary for nodes pasted in from the clipboard.
IList<PeptideGroupDocNode> peptideGroupsNew = new List<PeptideGroupDocNode>();
foreach (PeptideGroupDocNode nodePepGroup in peptideGroups)
{
// Set explicit modifications first, since it may impact which
// children will be present.
IList<DocNode> peptidesNew = new List<DocNode>();
foreach (PeptideDocNode nodePep in nodePepGroup.Children)
{
PeptideDocNode nodePepModified = nodePep.EnsureMods(
docImport.Settings.PeptideSettings.Modifications,
// ReSharper disable once PossibleNullReferenceException
docNew.Settings.PeptideSettings.Modifications,
staticMods, heavyMods);
if (nodePepModified.GlobalStandardType != null)
{
// Try to keep settings change from changing the children of standards being imported
nodePepModified = (PeptideDocNode)nodePepModified.ChangeAutoManageChildren(false)
.ChangeChildrenChecked(nodePepModified.TransitionGroups.Select(nodeGroup => nodeGroup.ChangeAutoManageChildren(false)).ToArray());
}
peptidesNew.Add(nodePepModified);
}
var nodePepGroupNew = (PeptideGroupDocNode)nodePepGroup.ChangeChildrenChecked(peptidesNew.ToArray());
nodePepGroupNew = nodePepGroupNew.EnsureChildren(docNew.Settings, pasteToPeptideList);
// Change settings to update everything in the peptide group to the settings of the
// new document, including results and peak integration
nodePepGroupNew = nodePepGroupNew.ChangeSettings(docNew.Settings, settingsDiff);
peptideGroupsNew.Add(nodePepGroupNew);
}
if (mergePeptides)
docNew = docNew.MergeMatchingPeptidesUserInfo(peptideGroupsNew);
docNew = docNew.AddPeptideGroups(peptideGroupsNew, pasteToPeptideList, to, out firstAdded, out nextAdd);
var modsNew = docNew.Settings.PeptideSettings.Modifications.DeclareExplicitMods(docNew,
staticMods, heavyMods);
if (!ReferenceEquals(modsNew, docNew.Settings.PeptideSettings.Modifications))
docNew = docNew.ChangeSettings(docNew.Settings.ChangePeptideModifications(mods => modsNew));
return docNew;
}
finally
{
PeptideModifications.SetSerializationContext(null);
}
}
/// <summary>
/// Inspect a file to see if it's Skyline XML data or not
/// </summary>
/// <param name="path">file to inspect</param>
/// <param name="explained">explanation of problem with file if it's not a Skyline document</param>
/// <returns>true iff file exists and has XML header that appears to be the start of Skyline document.</returns>
public static bool IsSkylineFile(string path, out string explained)
{
explained = string.Empty;
if (File.Exists(path))
{
try
{
// We have no idea what kind of file this might be, so even reading the first "line" might take a long time. Read a chunk instead.
var probeFile = File.OpenRead(path);
var CHUNKSIZE = 500; // Should be more than adequate to check for "?xml version="1.0" encoding="utf-8"?>< srm_settings format_version = "4.12" software_version = "Skyline (64-bit) " >"
var probeBuf = new byte[CHUNKSIZE];
probeFile.Read(probeBuf, 0, CHUNKSIZE);
probeBuf[CHUNKSIZE - 1] = 0;
var probeString = Encoding.UTF8.GetString(probeBuf);
if (!probeString.Contains(@"<srm_settings"))
{
explained = string.Format(
Resources.SkylineWindow_OpenFile_The_file_you_are_trying_to_open____0____does_not_appear_to_be_a_Skyline_document__Skyline_documents_normally_have_a___1___or___2___filename_extension_and_are_in_XML_format_,
path, EXT, SrmDocumentSharing.EXT_SKY_ZIP);
}
}
catch (Exception e)
{
explained = e.Message;
}
}
else
{
explained = Resources.ToolDescription_RunTool_File_not_found_; // "File not found"
}
return string.IsNullOrEmpty(explained);
}
/// <summary>
/// Tries to find a .sky file for a .skyd or .skyl etc file
/// </summary>
/// <param name="path">Path to file which may have a sibling .sky file</param>
/// <returns>Input path with extension changed to .sky, if such a file exists and appears to be a Skyline file</returns>
public static string FindSiblingSkylineFile(string path)
{
var index = path.LastIndexOf(EXT, StringComparison.Ordinal);
if (index > 0 && index == path.Length - (EXT.Length + 1))
{
// Looks like user picked a .skyd or .skyl etc
var likelyPath = path.Substring(0, index + EXT.Length);
if (File.Exists(likelyPath) && IsSkylineFile(likelyPath, out _))
{
return likelyPath;
}
}
return path;
}
private SrmDocument MergeMatchingPeptidesUserInfo(IList<PeptideGroupDocNode> peptideGroupsNew)
{
var setMerge = new HashSet<PeptideModKey>();
var dictPeptidesModified = new Dictionary<PeptideModKey, PeptideDocNode>();
foreach(var nodePep in peptideGroupsNew.SelectMany(nodePepGroup => nodePepGroup.Children)
.Cast<PeptideDocNode>())
{
var key = nodePep.Key;
setMerge.Add(key);
if (!nodePep.IsUserModified)
continue;
if (dictPeptidesModified.ContainsKey(key))
{
throw new InvalidDataException(
string.Format(Resources.SrmDocument_MergeMatchingPeptidesUserInfo_The_peptide__0__was_found_multiple_times_with_user_modifications,
nodePep.RawTextIdDisplay));
}
dictPeptidesModified.Add(key, nodePep);
}
var diff = new SrmSettingsDiff(Settings, true);
var setMerged = new HashSet<PeptideModKey>();
var listPeptideGroupsMerged = new List<DocNode>();
foreach (var nodePepGroup in MoleculeGroups)
{
var listPeptidesMerged = new List<DocNode>();
foreach (PeptideDocNode nodePep in nodePepGroup.Children)
{
// If the peptide has no match in the set to be merged, then just add it.
if (!setMerge.Contains(nodePep.Key))
listPeptidesMerged.Add(nodePep);
else
{
// Keep track of the matching peptides
setMerged.Add(nodePep.Key);
PeptideDocNode nodePepMatch;
// If it is not modified, it doesn't really need to be merged.
if (!dictPeptidesModified.TryGetValue(nodePep.Key, out nodePepMatch))
listPeptidesMerged.Add(nodePep);
else
listPeptidesMerged.Add(nodePep.MergeUserInfo(nodePepMatch, Settings, diff));
}
}
listPeptideGroupsMerged.Add(nodePepGroup.ChangeChildrenChecked(listPeptidesMerged));
}
// Update the list of peptide groups to add based on what got merged
foreach (var nodePepGroup in peptideGroupsNew.ToArray())
{
var listPeptidesUnmerged = new List<DocNode>();
foreach (PeptideDocNode nodePep in nodePepGroup.Children)
{
if (!setMerged.Contains(nodePep.Key))
listPeptidesUnmerged.Add(nodePep);
}
if (listPeptidesUnmerged.Count == 0)
peptideGroupsNew.Remove(nodePepGroup);
else
{
peptideGroupsNew[peptideGroupsNew.IndexOfReference(nodePepGroup)] =
(PeptideGroupDocNode) nodePepGroup.ChangeChildrenChecked(listPeptidesUnmerged);
}
}
return (SrmDocument) ChangeChildrenChecked(listPeptideGroupsMerged);
}
public SrmDocument ImportFasta(TextReader reader, bool peptideList,
IdentityPath to, out IdentityPath firstAdded)
{
int emptiesIgnored;
return ImportFasta(reader, null, -1, peptideList, to, out firstAdded, out emptiesIgnored);
}
public SrmDocument ImportFasta(TextReader reader, IProgressMonitor progressMonitor, long lines, bool peptideList,
IdentityPath to, out IdentityPath firstAdded, out int emptyPeptideGroups)
{
FastaImporter importer = new FastaImporter(this, peptideList);
IdentityPath nextAdd;
IEnumerable<PeptideGroupDocNode> imported = importer.Import(reader, progressMonitor, lines);
emptyPeptideGroups = importer.EmptyPeptideGroupCount;
return AddPeptideGroups(imported, peptideList, to, out firstAdded, out nextAdd);
}
public SrmDocument ImportFasta(TextReader reader, IProgressMonitor progressMonitor, long lines,
ModificationMatcher matcher, IdentityPath to, out IdentityPath firstAdded, out IdentityPath nextAdded, out int emptiesIgnored)
{
if (matcher == null)
{
nextAdded = null;
return ImportFasta(reader, progressMonitor, lines, false, to, out firstAdded, out emptiesIgnored);
}
FastaImporter importer = new FastaImporter(this, matcher);
IEnumerable<PeptideGroupDocNode> imported = importer.Import(reader, progressMonitor, lines);
emptiesIgnored = importer.EmptyPeptideGroupCount;
return AddPeptideGroups(imported, true, to, out firstAdded, out nextAdded);
}
public SrmDocument ImportMassList(MassListInputs inputs,
IdentityPath to,
out IdentityPath firstAdded)
{
List<MeasuredRetentionTime> irtPeptides;
List<SpectrumMzInfo> librarySpectra;
List<TransitionImportErrorInfo> errorList;
List<PeptideGroupDocNode> peptideGroups;
return ImportMassList(inputs, null, to, out firstAdded, out irtPeptides, out librarySpectra, out errorList, out peptideGroups);
}
public SrmDocument ImportMassList(MassListInputs inputs,
IdentityPath to,
out IdentityPath firstAdded,
out List<MeasuredRetentionTime> irtPeptides,
out List<SpectrumMzInfo> librarySpectra,
out List<TransitionImportErrorInfo> errorList)
{
List<PeptideGroupDocNode> peptideGroups;
return ImportMassList(inputs, null, to, out firstAdded, out irtPeptides, out librarySpectra, out errorList, out peptideGroups);
}
public SrmDocument ImportMassList(MassListInputs inputs,
IProgressMonitor progressMonitor,
IdentityPath to,
out IdentityPath firstAdded,
out List<MeasuredRetentionTime> irtPeptides,
out List<SpectrumMzInfo> librarySpectra,
out List<TransitionImportErrorInfo> errorList,
out List<PeptideGroupDocNode> peptideGroups)
{
MassListImporter importer = new MassListImporter(this, inputs);
// Is this a small molecule transition list, or trying to be?
if (SmallMoleculeTransitionListCSVReader.IsPlausibleSmallMoleculeTransitionList(importer.Inputs.ReadLines()))
{
var docNewSmallMolecules = this;
irtPeptides = new List<MeasuredRetentionTime>();
librarySpectra = new List<SpectrumMzInfo>();
peptideGroups = new List<PeptideGroupDocNode>();
errorList = new List<TransitionImportErrorInfo>();
firstAdded = null;
try
{
var reader = new SmallMoleculeTransitionListCSVReader(importer.Inputs.ReadLines());
docNewSmallMolecules = reader.CreateTargets(this, to, out firstAdded);
}
catch (LineColNumberedIoException x)
{
errorList.Add(new TransitionImportErrorInfo(x.PlainMessage, x.ColumnIndex, x.LineNumber, null)); // CONSIDER: worth the effort to pull row and column info from error message?
}
return docNewSmallMolecules;
}
IdentityPath nextAdd;
peptideGroups = importer.Import(progressMonitor, inputs.InputFilename, out irtPeptides, out librarySpectra, out errorList).ToList();
var docNew = AddPeptideGroups(peptideGroups, false, to, out firstAdded, out nextAdd);
var pepModsNew = importer.GetModifications(docNew);
if (!ReferenceEquals(pepModsNew, Settings.PeptideSettings.Modifications))
{
docNew = docNew.ChangeSettings(docNew.Settings.ChangePeptideModifications(mods => pepModsNew));
docNew.Settings.UpdateDefaultModifications(false);
}
return docNew;
}
public SrmDocument AddIrtPeptides(List<DbIrtPeptide> irtPeptides, bool overwriteExisting, IProgressMonitor progressMonitor)
{
var retentionTimeRegression = Settings.PeptideSettings.Prediction.RetentionTime;
if (retentionTimeRegression == null || !(retentionTimeRegression.Calculator is RCalcIrt))
{
throw new InvalidDataException(Resources.SrmDocument_AddIrtPeptides_Must_have_an_active_iRT_calculator_to_add_iRT_peptides);
}
var calculator = (RCalcIrt) retentionTimeRegression.Calculator;
string dbPath = calculator.DatabasePath;
IrtDb db = File.Exists(dbPath) ? IrtDb.GetIrtDb(dbPath, null) : IrtDb.CreateIrtDb(dbPath);
var oldPeptides = db.GetPeptides().Select(p => new DbIrtPeptide(p)).ToList();
IList<DbIrtPeptide.Conflict> conflicts;
var peptidesCombined = DbIrtPeptide.FindNonConflicts(oldPeptides, irtPeptides, progressMonitor, out conflicts);
if (peptidesCombined == null)
return null;
foreach (var conflict in conflicts)
{
// If old and new peptides are a library entry and a standards entry, throw an error
// The same peptide must not appear in both places
if (conflict.NewPeptide.Standard ^ conflict.ExistingPeptide.Standard)
{
throw new InvalidDataException(string.Format(Resources.SkylineWindow_AddIrtPeptides_Imported_peptide__0__with_iRT_library_value_is_already_being_used_as_an_iRT_standard_,
conflict.NewPeptide.ModifiedTarget));
}
}
// Peptides that were already present in the database can be either kept or overwritten
peptidesCombined.AddRange(conflicts.Select(conflict => overwriteExisting ? conflict.NewPeptide : conflict.ExistingPeptide));
db = db.UpdatePeptides(peptidesCombined, oldPeptides);
calculator = calculator.ChangeDatabase(db);
retentionTimeRegression = retentionTimeRegression.ChangeCalculator(calculator);
var srmSettings = Settings.ChangePeptidePrediction(pred => pred.ChangeRetentionTime(retentionTimeRegression));
if (ReferenceEquals(srmSettings, Settings))
return this;
return ChangeSettings(srmSettings);
}
public static bool IsConvertedFromProteomicTestDocNode(DocNode node)
{
// Is this a node that was created for test purposes by transforming an existing peptide doc?
return (node != null && node.Annotations.Note != null &&
node.Annotations.Note.Contains(RefinementSettings.TestingConvertedFromProteomic));
}
public SrmDocument AddPeptideGroups(IEnumerable<PeptideGroupDocNode> peptideGroupsNew,
bool peptideList, IdentityPath to, out IdentityPath firstAdded, out IdentityPath nextAdd)
{
// For multiple add operations, make the next addtion at the same location by default
nextAdd = to;
var peptideGroupsAdd = peptideGroupsNew.ToList();
// If there are no new groups to add, as in the case where already added
// FASTA sequences are pasted, just return this, and a null path. Callers
// must handle this case gracefully, e.g. not adding an undo record.
if (peptideGroupsAdd.Count == 0)
{
firstAdded = null;
return this;
}
firstAdded = new IdentityPath(peptideGroupsAdd[0].Id);
// Add to the end, if no insert node
if (to == null || to.Depth < (int)Level.MoleculeGroups)
return (SrmDocument) AddAll(peptideGroupsAdd);
IdentityPath pathGroup = to.GetPathTo((int)Level.MoleculeGroups);
// Precalc depth of last identity in the path
int last = to.Length - 1;
// If it is a peptide list, allow pasting to children to existing peptide list.
if (peptideList && !(to.GetIdentity((int)Level.MoleculeGroups) is FastaSequence))
{
// PeptideGroupDocNode nodeGroup = (PeptideGroupDocNode) FindNode(pathGroup);
// Add only peptides not already in this group
// With explicit modifications, there is now reason to add duplicates,
// when multiple modified forms are desired.
HashSet<Peptide> setPeptides = new HashSet<Peptide>();
// foreach (PeptideDocNode nodePeptide in nodeGroup.Children)
// setPeptides.Add(nodePeptide.Peptide);
List<DocNode> listAdd = new List<DocNode>();
foreach (PeptideDocNode nodePeptide in peptideGroupsAdd[0].Children)
{
if (!setPeptides.Contains(nodePeptide.Peptide))
{
listAdd.Add(nodePeptide);
setPeptides.Add(nodePeptide.Peptide);
}
}
// No modification necessary, if no unique peptides
if (listAdd.Count == 0)
{
firstAdded = null;
return this;
}
// If no peptide was in the selection path, add to the end of the list
DocNode docNew;
if (last < (int)Level.Molecules)
docNew = AddAll(to, listAdd);
// If one of the peptides was selected, insert before it
else if (last == (int)Level.Molecules)
docNew = InsertAll(to, listAdd);
// Otherise, insert after the peptide of the child that was selected
else
{
nextAdd = FindNextInsertNode(to, (int) Level.Molecules);
docNew = InsertAll(to.GetPathTo((int)Level.Molecules), listAdd, true);
}
// Change the selection path to point to the first peptide pasted.
firstAdded = new IdentityPath(pathGroup, listAdd[0].Id);
return (SrmDocument)docNew;
}
// Insert the new groups before a selected group
else if (last == (int)Level.MoleculeGroups)
return (SrmDocument)InsertAll(pathGroup, peptideGroupsAdd);
// Or after, if a group child is selected
else
{
nextAdd = FindNextInsertNode(to, (int)Level.MoleculeGroups);
return (SrmDocument)InsertAll(pathGroup, peptideGroupsAdd, true);
}
}
private IdentityPath FindNextInsertNode(IdentityPath identityPath, int depth)
{
if (identityPath == null)
return null;
// Get the path to the desired level
while (identityPath.Depth > depth)
identityPath = identityPath.Parent;
// Get the index to the node at that level and add 1
int iNode = FindNodeIndex(identityPath) + 1;
// If the next node exists, get the path to it
IdentityPath identityPathNext = null;
if (iNode < GetCount(depth))
identityPathNext = GetPathTo(depth, iNode);
// If no next node was available, or the next node belongs to a new parent
// return the parent, or null if at the root.
if (identityPathNext == null || !Equals(identityPath.Parent, identityPathNext.Parent))
return (depth != 0 ? identityPath.Parent : null);
// Return the path to the next node.
return identityPathNext;
}
public bool IsValidMove(IdentityPath from, IdentityPath to)
{
int lastFrom = from.Length - 1;
// Peptide groups can always be moved
if (lastFrom == (int)Level.MoleculeGroups)
return true;
// Peptides can be moved, if going from a peptide list to a peptide list
else if (to != null && lastFrom == (int)Level.Molecules &&
!(from.GetIdentity((int)Level.MoleculeGroups) is FastaSequence) &&
!(to.GetIdentity((int)Level.MoleculeGroups) is FastaSequence))
return true;
return false;
}
public SrmDocument MoveNode(IdentityPath from, IdentityPath to, out IdentityPath newLocation)
{
DocNode nodeFrom = FindNode(from);
if (nodeFrom == null)
throw new IdentityNotFoundException(from.Child);
int lastFrom = from.Length - 1;
int lastTo = (to == null ? -1 : to.Length - 1);
// Figure out where actually to put the moving node.
if (lastFrom == (int)Level.MoleculeGroups)
{
SrmDocument document = (SrmDocument)RemoveChild(nodeFrom);
// If no good target, append
if (to == null || lastTo == -1)
document = (SrmDocument)document.Add(nodeFrom);
// If dropped over a group, insert before
else if (lastTo == (int)Level.MoleculeGroups)
document = (SrmDocument)document.Insert(to, nodeFrom);
// If over the child of a group, insert after
else
document = (SrmDocument)document.Insert(to.GetPathTo((int)Level.MoleculeGroups), nodeFrom, true);
newLocation = new IdentityPath(nodeFrom.Id);
return document;
}
// If moving a peptide that comes from a peptide list
else if (lastFrom == (int)Level.Molecules)
{
if (from.GetIdentity((int)Level.MoleculeGroups) is FastaSequence)
throw new InvalidOperationException(Resources.SrmDocument_MoveNode_Invalid_move_source);
if (to == null || to.GetIdentity((int)Level.MoleculeGroups) is FastaSequence)
throw new InvalidOperationException(Resources.SrmDocument_MoveNode_Invalid_move_target);
SrmDocument document = (SrmDocument)RemoveChild(from.Parent, nodeFrom);
// If dropped over a group, add to the end
if (lastTo == (int)Level.MoleculeGroups)
document = (SrmDocument) document.Add(to, nodeFrom);
// If over a peptide, insert before
else if (lastTo == (int)Level.Molecules)
document = (SrmDocument) document.Insert(to, nodeFrom);
// If over the child of a peptide, insert after
else
document = (SrmDocument) document.Insert(to.GetPathTo((int)Level.Molecules), nodeFrom, true);
newLocation = new IdentityPath(to.GetPathTo((int)Level.MoleculeGroups), nodeFrom.Id);
return document;
}
throw new InvalidOperationException(Resources.SrmDocument_MoveNode_Invalid_move_source);
}
public SrmDocument AddPrecursorResultsAnnotations(IdentityPath groupPath, ChromFileInfoId fileId,
Dictionary<string, string> annotations)
{
var groupNode = (TransitionGroupDocNode) FindNode(groupPath);
var groupNodeNew = groupNode.AddPrecursorAnnotations(fileId, annotations);
if (ReferenceEquals(groupNode, groupNodeNew))
return this;
return (SrmDocument) ReplaceChild(groupPath.Parent, groupNodeNew);
}
public SrmDocument ChangePeak(IdentityPath groupPath, string nameSet, MsDataFileUri filePath,
Identity tranId, double retentionTime, UserSet userSet)
{
return ChangePeak(groupPath, nameSet, filePath, false,
(node, info, tol, iSet, fileId, reg) =>
node.ChangePeak(Settings, info, tol, iSet, fileId, reg, tranId, retentionTime, userSet));
}
public SrmDocument ChangePeak(IdentityPath groupPath, string nameSet, MsDataFileUri filePath,
Transition transition, double? startTime, double? endTime, UserSet userSet, PeakIdentification? identified, bool preserveMissingPeaks)
{
// If start or end time is null, just assign an arbitrary value to identified -- peak will be deleted anyway
if (!startTime.HasValue || !endTime.HasValue)
identified = PeakIdentification.FALSE;
// If a null identification is passed in (currently only happens from the PeakBoundaryImport function),
// look up the identification status directly
if (!identified.HasValue)
{
IdentityPath peptidePath = groupPath.Parent;
var nodePep = (PeptideDocNode) FindNode(peptidePath);
var nodeGroup = (TransitionGroupDocNode) FindNode(groupPath);
if (nodeGroup == null)
throw new IdentityNotFoundException(groupPath.Child);
var lookupSequence = nodePep.SourceUnmodifiedTarget;
var lookupMods = nodePep.SourceExplicitMods;
IsotopeLabelType labelType;
double[] retentionTimes;
Settings.TryGetRetentionTimes(lookupSequence, nodeGroup.TransitionGroup.PrecursorAdduct, lookupMods,
filePath, out labelType, out retentionTimes);
if(ContainsTime(retentionTimes, startTime.Value, endTime.Value))
{
identified = PeakIdentification.TRUE;
}
else
{
var alignedRetentionTimes = Settings.GetAlignedRetentionTimes(filePath,
lookupSequence, lookupMods);
identified = ContainsTime(alignedRetentionTimes, startTime.Value, endTime.Value)
? PeakIdentification.ALIGNED
: PeakIdentification.FALSE;
}
}
return ChangePeak(groupPath, nameSet, filePath, true,
(node, info, tol, iSet, fileId, reg) =>
node.ChangePeak(Settings, info, tol, iSet, fileId, reg, transition, startTime,
endTime, identified.Value, userSet, preserveMissingPeaks));
}
private bool ContainsTime(double[] times, double startTime, double endTime)
{
return times != null && times.Any(time => startTime <= time && time <= endTime);
}
private delegate DocNode ChangeNodePeak(TransitionGroupDocNode nodeGroup,
ChromatogramGroupInfo chromInfoGroup, double mzMatchTolerance, int indexSet,
ChromFileInfoId indexFile, OptimizableRegression regression);
private SrmDocument ChangePeak(IdentityPath groupPath, string nameSet, MsDataFileUri filePath, bool loadPoints,
ChangeNodePeak change)
{
var groupId = groupPath.Child;
var nodePep = (PeptideDocNode) FindNode(groupPath.Parent);
if (nodePep == null)
throw new IdentityNotFoundException(groupId);
var nodeGroup = (TransitionGroupDocNode)nodePep.FindNode(groupId);
if (nodeGroup == null)
throw new IdentityNotFoundException(groupId);
// Get the chromatogram set containing the chromatograms of interest
int indexSet;
ChromatogramSet chromatograms;
if (!Settings.HasResults || !Settings.MeasuredResults.TryGetChromatogramSet(nameSet, out chromatograms, out indexSet))
throw new ArgumentOutOfRangeException(string.Format(Resources.SrmDocument_ChangePeak_No_replicate_named__0__was_found, nameSet));
// Calculate the file index that supplied the chromatograms
ChromFileInfoId fileId = chromatograms.FindFile(filePath);
if (fileId == null)
{
throw new ArgumentOutOfRangeException(
string.Format(Resources.SrmDocument_ChangePeak_The_file__0__was_not_found_in_the_replicate__1__,
filePath, nameSet));
}
// Get all chromatograms for this transition group
double mzMatchTolerance = Settings.TransitionSettings.Instrument.MzMatchTolerance;
ChromatogramGroupInfo[] arrayChromInfo;
if (!Settings.MeasuredResults.TryLoadChromatogram(chromatograms, nodePep, nodeGroup,
(float) mzMatchTolerance, loadPoints, out arrayChromInfo))
{
throw new ArgumentOutOfRangeException(string.Format(Resources.SrmDocument_ChangePeak_No_results_found_for_the_precursor__0__in_the_replicate__1__,
TransitionGroupTreeNode.GetLabel(nodeGroup.TransitionGroup, nodeGroup.PrecursorMz, string.Empty), nameSet));
}
// Get the chromatograms for only the file of interest
int indexInfo = arrayChromInfo.IndexOf(info => Equals(filePath, info.FilePath));
if (indexInfo == -1)
{
throw new ArgumentOutOfRangeException(string.Format(Resources.SrmDocument_ChangePeak_No_results_found_for_the_precursor__0__in_the_file__1__,
TransitionGroupTreeNode.GetLabel(nodeGroup.TransitionGroup, nodeGroup.PrecursorMz, string.Empty), filePath));
}
var chromInfoGroup = arrayChromInfo[indexInfo];
var nodeGroupNew = change(nodeGroup, chromInfoGroup, mzMatchTolerance, indexSet, fileId,
chromatograms.OptimizationFunction);
if (ReferenceEquals(nodeGroup, nodeGroupNew))
return this;
return (SrmDocument)ReplaceChild(groupPath.Parent, nodeGroupNew);
}
public SrmDocument ChangePeptideMods(IdentityPath peptidePath, ExplicitMods mods,
IList<StaticMod> listGlobalStaticMods, IList<StaticMod> listGlobalHeavyMods)
{
return ChangePeptideMods(peptidePath, mods, false, listGlobalStaticMods, listGlobalHeavyMods);
}
public SrmDocument ChangePeptideMods(IdentityPath peptidePath, ExplicitMods mods, bool createCopy,
IList<StaticMod> listGlobalStaticMods, IList<StaticMod> listGlobalHeavyMods)
{
var docResult = this;
var pepMods = docResult.Settings.PeptideSettings.Modifications;
var nodePeptide = (PeptideDocNode)FindNode(peptidePath);
if (nodePeptide == null)
throw new IdentityNotFoundException(peptidePath.Child);
// Make sure modifications are in synch with global values
if (mods != null)
{
mods = mods.ChangeGlobalMods(listGlobalStaticMods, listGlobalHeavyMods,
pepMods.GetHeavyModificationTypes().ToArray());
}
// If modifications have changed, update the peptide.
var modsPep = nodePeptide.ExplicitMods;
if (createCopy || !Equals(mods, modsPep))
{
// Update the peptide to the new explicit modifications
// Change the explicit modifications, and force a settings update through the peptide
// to all of its children.
// CONSIDER: This is not really the right SrmSettings object to be using for this
// update, but constructing the right one currently depends on the
// peptide being added to the document. Doesn't seem like the potential
// changes would have any impact on this operation, though.
if (createCopy)
{
nodePeptide = new PeptideDocNode((Peptide)nodePeptide.Peptide.Copy(),
Settings,
nodePeptide.ExplicitMods,
nodePeptide.SourceKey,
nodePeptide.GlobalStandardType,
nodePeptide.Rank,
nodePeptide.ExplicitRetentionTime,
Annotations.EMPTY,
null, // Results
nodePeptide.Children.ToList().ConvertAll(node => (TransitionGroupDocNode)node).ToArray(),
nodePeptide.AutoManageChildren);
nodePeptide = nodePeptide.ChangeExplicitMods(mods).ChangeSettings(Settings, SrmSettingsDiff.ALL);
docResult = (SrmDocument)docResult.Insert(peptidePath, nodePeptide, true);
}
else
{
nodePeptide = nodePeptide.ChangeExplicitMods(mods).ChangeSettings(Settings, SrmSettingsDiff.ALL);
docResult = (SrmDocument)docResult.ReplaceChild(peptidePath.Parent, nodePeptide);
}
// Turn off auto-manage children for the peptide group if it is a FASTA sequence,
// because the child lists the FASTA sequence will create will not contain this manually
// altered peptide.
var nodePepGroup = (PeptideGroupDocNode)docResult.FindNode(peptidePath.Parent);
if (!nodePepGroup.IsPeptideList)
{
// Make sure peptides are ranked correctly
var childrenNew = PeptideGroup.RankPeptides(nodePepGroup.Children, docResult.Settings, false);
docResult = (SrmDocument)docResult.ReplaceChild(nodePepGroup
.ChangeAutoManageChildren(false)
.ChangeChildrenChecked(childrenNew));
}
}
var pepModsNew = pepMods.DeclareExplicitMods(docResult, listGlobalStaticMods, listGlobalHeavyMods);
if (Equals(pepModsNew, pepMods))
return docResult;
// Make sure any newly included modifications are added to the settings
var settings = docResult.Settings.ChangePeptideModifications(m => pepModsNew);
return docResult.ChangeSettings(settings);
}
public IdentityPath SearchDocumentForString(IdentityPath identityPath, string text, DisplaySettings settings, bool reverse, bool caseSensitive)
{
var findOptions = new FindOptions()
.ChangeText(text)
.ChangeForward(!reverse)
.ChangeCaseSensitive(caseSensitive);
var findResult = SearchDocument(new Bookmark(identityPath), findOptions, settings);
if (findResult == null)
{
return null;
}
return findResult.Bookmark.IdentityPath;
}
public FindResult SearchDocument(Bookmark startPath, FindOptions findOptions, DisplaySettings settings)
{
var bookmarkEnumerator = new BookmarkEnumerator(this, startPath) {Forward = findOptions.Forward};
return FindNext(bookmarkEnumerator, findOptions, settings);
}
private static FindResult FindNext(BookmarkEnumerator bookmarkEnumerator, FindOptions findOptions, DisplaySettings settings)
{
var findPredicate = new FindPredicate(findOptions, settings);
return findPredicate.FindNext(bookmarkEnumerator);
}
public SrmDocument ChangeStandardType(StandardType standardType, IEnumerable<IdentityPath> selPaths)
{
SrmDocument doc = this;
var replacements = new List<NodeReplacement>();
foreach (IdentityPath nodePath in selPaths)
{
var nodePep = doc.FindNode(nodePath) as PeptideDocNode;
if (nodePep == null || nodePep.IsDecoy || Equals(standardType, nodePep.GlobalStandardType))
continue;
replacements.Add(new NodeReplacement(nodePath.Parent, nodePep.ChangeStandardType(standardType)));
}
doc = (SrmDocument) doc.ReplaceChildren(replacements);
return doc;
}
public IEnumerable<PeptideDocNode> GetSurrogateStandards()
{
return Molecules.Where(mol => Equals(mol.GlobalStandardType, StandardType.SURROGATE_STANDARD));
}
public SrmDocument BeginDeferSettingsChanges()
{
return ChangeProp(ImClone(this), im => im.DeferSettingsChanges = true);
}
public SrmDocument EndDeferSettingsChanges(SrmDocument originalDocument, SrmSettingsChangeMonitor progressMonitor)
{
var docWithOriginalSettings = (SrmDocument) ChangeProp(ImClone(this), im =>
{
im.Settings = originalDocument.Settings;
im.DeferSettingsChanges = false;
}).ChangeChildren(originalDocument.Children);
var doc = docWithOriginalSettings
.ChangeSettings(Settings, progressMonitor)
.ChangeMeasuredResults(Settings.MeasuredResults, progressMonitor);
doc = (SrmDocument) doc.ChangeChildren(Children.ToArray());
return doc;
}
private object _referenceId = new object();
/// <summary>
/// Value which is unique to this instance of the SrmDocument.
/// This enables you to determine whether another SrmDocument is ReferenceEquals to this, without
/// having to hold onto a reference to this.
/// <see cref="pwiz.Skyline.Model.Databinding.CachedValue{T}"/>
/// </summary>
public object ReferenceId { get { return _referenceId; } }
protected override object ImmutableClone()
{
SrmDocument document = (SrmDocument) base.ImmutableClone();
document._referenceId = new object();
return document;
}
#region Implementation of IXmlSerializable
/// <summary>
/// For deserialization
/// </summary>
// ReSharper disable UnusedMember.Local
private SrmDocument()
// ReSharper restore UnusedMember.Local
: base(new SrmDocumentId())
{
}
/// <summary>
/// Deserializes document from XML.
/// </summary>
/// <param name="reader">The reader positioned at the document start tag</param>
public void ReadXml(XmlReader reader)
{
if (Settings != null)
{
throw new InvalidOperationException();
}
var documentReader = new DocumentReader();
documentReader.ReadXml(reader);
FormatVersion = documentReader.FormatVersion;
Settings = documentReader.Settings;
if (documentReader.Children == null)
SetChildren(new PeptideGroupDocNode[0]);
else
{
var children = documentReader.Children;
// Make sure peptide standards lists are up to date
Settings = Settings.CachePeptideStandards(new PeptideGroupDocNode[0], children);
SetChildren(UpdateResultsSummaries(children, new Dictionary<int, PeptideDocNode>()));
IsProteinMetadataPending = CalcIsProteinMetadataPending(); // Background loaders are about to kick in, they need this info.
}
SetDocumentType(); // Note proteomic vs small_molecules vs mixed
AuditLog = AuditLog ?? new AuditLogList();
}
public SrmDocument ReadAuditLog(string documentPath, string expectedSkylineDocumentHash, Func<AuditLogEntry> getDefaultEntry)
{
var auditLog = new AuditLogList();
var auditLogPath = GetAuditLogPath(documentPath);
if (File.Exists(auditLogPath))
{
if (AuditLogList.ReadFromFile(auditLogPath, out var loggedSkylineDocumentHash, out var auditLogList))
{
auditLog = auditLogList;
if (expectedSkylineDocumentHash != loggedSkylineDocumentHash)
{
var entry = getDefaultEntry() ?? AuditLogEntry.CreateUndocumentedChangeEntry();
auditLog = new AuditLogList(entry.ChangeParent(auditLog.AuditLogEntries));
}
}
}
return ChangeDocumentHash(expectedSkylineDocumentHash).ChangeAuditLog(auditLog);
}
public void WriteXml(XmlWriter writer)
{
SerializeToXmlWriter(writer, SkylineVersion.CURRENT, null, null);
}
public void SerializeToXmlWriter(XmlWriter writer, SkylineVersion skylineVersion, IProgressMonitor progressMonitor,
IProgressStatus progressStatus)
{
var document = DocumentAnnotationUpdater.UpdateAnnotations(this, progressMonitor, progressStatus);
var documentWriter = new DocumentWriter(document, skylineVersion);
if (progressMonitor != null)
{
int transitionsWritten = 0;
int totalTransitionCount = MoleculeTransitionCount;
documentWriter.WroteTransitions += count =>
{
transitionsWritten += count;
progressStatus = progressStatus.UpdatePercentCompleteProgress(progressMonitor, transitionsWritten, totalTransitionCount);
};
}
documentWriter.WriteXml(writer);
}
public static string GetAuditLogPath(string docPath)
{
if (string.IsNullOrEmpty(docPath))
return docPath;
var directory = Path.GetDirectoryName(docPath);
if (directory == null)
return null;
var fileName = Path.GetFileNameWithoutExtension(docPath) + AuditLogList.EXT;
return Path.Combine(directory, fileName);
}
public void SerializeToFile(string tempName, string displayName, SkylineVersion skylineVersion, IProgressMonitor progressMonitor)
{
string hash;
using (var writer = new XmlTextWriter(HashingStream.CreateWriteStream(tempName), Encoding.UTF8)
{
Formatting = Formatting.Indented
})
{
hash = Serialize(writer, displayName, skylineVersion, progressMonitor);
}
var auditLogPath = GetAuditLogPath(displayName);
if (Settings.DataSettings.AuditLogging)
AuditLog?.WriteToFile(auditLogPath, hash);
else if (File.Exists(auditLogPath))
Helpers.TryTwice(() => File.Delete(auditLogPath));
}
public string Serialize(XmlTextWriter writer, string displayName, SkylineVersion skylineVersion, IProgressMonitor progressMonitor)
{
writer.WriteStartDocument();
writer.WriteStartElement(@"srm_settings");
SerializeToXmlWriter(writer, skylineVersion, progressMonitor, new ProgressStatus(Path.GetFileName(displayName)));
writer.WriteEndElement();
writer.WriteEndDocument();
writer.Flush();
return ((HashingStream) writer.BaseStream)?.Done();
}
public XmlSchema GetSchema()
{
return null;
}
public void ValidateResults()
{
foreach (PeptideDocNode nodePep in Molecules)
{
ValidateChromInfo(Settings, nodePep.Results);
foreach (TransitionGroupDocNode nodeGroup in nodePep.Children)
{
ValidateChromInfo(Settings, nodeGroup.Results);
foreach (TransitionDocNode nodeTran in nodeGroup.Transitions)
{
ValidateChromInfo(Settings, nodeTran.Results);
}
}
}
}
private static void ValidateChromInfo<TInfo>(SrmSettings settings, Results<TInfo> results)
where TInfo : ChromInfo
{
if (!settings.HasResults)
{
if (results != null)
throw new InvalidDataException(Resources.SrmDocumentValidateChromInfoResults_found_in_document_with_no_replicates);
return;
}
// This check was a little too agressive.
// If a node's transition count is zero, then it can still have null for results.
// if (results == null)
// throw new InvalidDataException("DocNode missing results in document with replicates.");
if (results != null)
results.Validate(settings);
}
public double GetCollisionEnergy(PeptideDocNode nodePep, TransitionGroupDocNode nodeGroup, TransitionDocNode nodeTran, int step)
{
return GetCollisionEnergy(Settings, nodePep, nodeGroup, nodeTran,
Settings.TransitionSettings.Prediction.CollisionEnergy, step);
}
public static double GetCollisionEnergy(SrmSettings settings, PeptideDocNode nodePep,
TransitionGroupDocNode nodeGroup, TransitionDocNode nodeTran, CollisionEnergyRegression regression, int step)
{
var ce = nodeTran==null // If we're only given a precursor, use the explicit CE of its children if they all agree
? (nodeGroup.Children.Any() && nodeGroup.Children.All( node => ((TransitionDocNode)node).ExplicitValues.CollisionEnergy == ((TransitionDocNode)nodeGroup.Children.First()).ExplicitValues.CollisionEnergy)
? ((TransitionDocNode)nodeGroup.Children.First()).ExplicitValues.CollisionEnergy : null)
: nodeTran.ExplicitValues.CollisionEnergy;
if (regression != null)
{
if (!ce.HasValue)
{
var charge = nodeGroup.TransitionGroup.PrecursorAdduct;
var mz = settings.GetRegressionMz(nodePep, nodeGroup);
ce = regression.GetCollisionEnergy(charge, mz);
}
return ce.Value + regression.StepSize * step;
}
return ce ?? 0.0;
}
public double? GetOptimizedCollisionEnergy(PeptideDocNode nodePep, TransitionGroupDocNode nodeGroup, TransitionDocNode nodeTransition)
{
var prediction = Settings.TransitionSettings.Prediction;
var methodType = prediction.OptimizedMethodType;
var lib = prediction.OptimizedLibrary;
if (lib != null && !lib.IsNone)
{
var optimization = lib.GetOptimization(OptimizationType.collision_energy,
Settings.GetSourceTarget(nodePep), nodeGroup.PrecursorAdduct,
nodeTransition.FragmentIonName, nodeTransition.Transition.Adduct);
if (optimization != null)
{
return optimization.Value;
}
}
if (prediction.OptimizedMethodType != OptimizedMethodType.None)
{
return OptimizationStep<CollisionEnergyRegression>.FindOptimizedValue(Settings,
nodePep, nodeGroup, nodeTransition, methodType, prediction.CollisionEnergy,
GetCollisionEnergy);
}
return null;
}
public double GetDeclusteringPotential(PeptideDocNode nodePep,
TransitionGroupDocNode nodeGroup, TransitionDocNode nodeTran, int step)
{
return GetDeclusteringPotential(Settings, nodePep, nodeGroup, nodeTran,
Settings.TransitionSettings.Prediction.DeclusteringPotential, step);
}
public static double GetDeclusteringPotential(SrmSettings settings, PeptideDocNode nodePep,
TransitionGroupDocNode nodeGroup, TransitionDocNode nodeTran, DeclusteringPotentialRegression regression, int step)
{
if (ExplicitTransitionValues.Get(nodeTran).DeclusteringPotential.HasValue)
return nodeTran.ExplicitValues.DeclusteringPotential.Value; // Explicitly set, overrides calculation
if (regression == null)
return 0;
double mz = settings.GetRegressionMz(nodePep, nodeGroup);
return regression.GetDeclustringPotential(mz) + regression.StepSize * step;
}
public double GetOptimizedDeclusteringPotential(PeptideDocNode nodePep, TransitionGroupDocNode nodeGroup, TransitionDocNode nodeTransition)
{
var prediction = Settings.TransitionSettings.Prediction;
var methodType = prediction.OptimizedMethodType;
var regression = prediction.DeclusteringPotential;
return OptimizationStep<DeclusteringPotentialRegression>.FindOptimizedValue(Settings,
nodePep, nodeGroup, nodeTransition, methodType, regression, GetDeclusteringPotential);
}
public IEnumerable<string> GetMissingCompensationVoltages(CompensationVoltageParameters.Tuning tuneLevel)
{
if (tuneLevel.Equals(CompensationVoltageParameters.Tuning.none))
yield break;
var lib = Settings.HasOptimizationLibrary
? Settings.TransitionSettings.Prediction.OptimizedLibrary
: null;
var optType = CompensationVoltageParameters.GetOptimizationType(tuneLevel);
foreach (var nodePep in Molecules)
{
foreach (var nodeTranGroup in nodePep.TransitionGroups.Where(nodeGroup => nodeGroup.Children.Any()))
{
if (nodeTranGroup.ExplicitValues.CompensationVoltage.HasValue)
break;
if (lib != null && !lib.IsNone && lib.GetOptimization(optType, Settings.GetSourceTarget(nodePep),
nodeTranGroup.PrecursorAdduct) != null)
break;
double? cov;
switch (tuneLevel)
{
case CompensationVoltageParameters.Tuning.fine:
cov = OptimizationStep<CompensationVoltageRegressionFine>.FindOptimizedValueFromResults(
Settings, nodePep, nodeTranGroup, null, OptimizedMethodType.Precursor, GetCompensationVoltageFine);
break;
case CompensationVoltageParameters.Tuning.medium:
cov = OptimizationStep<CompensationVoltageRegressionMedium>.FindOptimizedValueFromResults(
Settings, nodePep, nodeTranGroup, null, OptimizedMethodType.Precursor, GetCompensationVoltageMedium);
break;
default:
cov = OptimizationStep<CompensationVoltageRegressionRough>.FindOptimizedValueFromResults(
Settings, nodePep, nodeTranGroup, null, OptimizedMethodType.Precursor, GetCompensationVoltageRough);
break;
}
if (!cov.HasValue || cov.Value.Equals(0))
{
yield return nodeTranGroup.ToString();
}
}
}
}
public CompensationVoltageParameters.Tuning HighestCompensationVoltageTuning()
{
if (Settings.HasOptimizationLibrary)
{
// Optimization library may contain fine tune CoV values
if (Settings.TransitionSettings.Prediction.OptimizedLibrary.HasType(OptimizationType.compensation_voltage_fine))
return CompensationVoltageParameters.Tuning.fine;
}
// Get highest tune level imported
var highestTuneLevel = CompensationVoltageParameters.Tuning.none;
if (Settings.HasResults)
{
foreach (var chromatogram in Settings.MeasuredResults.Chromatograms)
{
if (chromatogram.OptimizationFunction == null)
continue;
var optType = chromatogram.OptimizationFunction.OptType;
if (highestTuneLevel < CompensationVoltageParameters.Tuning.fine &&
OptimizationType.compensation_voltage_fine.Equals(optType))
{
return CompensationVoltageParameters.Tuning.fine;
}
else if (highestTuneLevel < CompensationVoltageParameters.Tuning.medium &&
OptimizationType.compensation_voltage_medium.Equals(optType))
{
highestTuneLevel = CompensationVoltageParameters.Tuning.medium;
}
else if (highestTuneLevel < CompensationVoltageParameters.Tuning.rough &&
OptimizationType.compensation_voltage_rough.Equals(optType))
{
highestTuneLevel = CompensationVoltageParameters.Tuning.rough;
}
}
}
return highestTuneLevel;
}
public IEnumerable<string> GetPrecursorsWithoutTopRank(int primaryTransitionCount, int? schedulingReplicateIndex)
{
foreach (var seq in MoleculeGroups)
{
foreach (PeptideDocNode nodePep in seq.Children)
{
foreach (TransitionGroupDocNode nodeGroup in nodePep.Children.Where(nodeGroup => ((TransitionGroupDocNode)nodeGroup).TransitionCount > 1))
{
bool rankOne = false;
foreach (TransitionDocNode nodeTran in nodeGroup.Children)
{
var groupPrimary = primaryTransitionCount > 0
? nodePep.GetPrimaryResultsGroup(nodeGroup)
: null;
int? rank = nodeGroup.GetRank(groupPrimary, nodeTran, schedulingReplicateIndex);
if (rank.HasValue && rank == 1)
{
rankOne = true;
break;
}
}
if (!rankOne)
{
yield return nodeGroup.ToString();
}
}
}
}
}
public double GetCompensationVoltage(PeptideDocNode nodePep, TransitionGroupDocNode nodeGroup, TransitionDocNode nodeTran, int step, CompensationVoltageParameters.Tuning tuneLevel)
{
var cov = Settings.TransitionSettings.Prediction.CompensationVoltage;
switch (tuneLevel)
{
case CompensationVoltageParameters.Tuning.fine:
return GetCompensationVoltageFine(Settings, nodePep, nodeGroup, nodeTran, cov, step);
case CompensationVoltageParameters.Tuning.medium:
return GetCompensationVoltageMedium(Settings, nodePep, nodeGroup, nodeTran, cov, step);
default:
return GetCompensationVoltageRough(Settings, nodePep, nodeGroup, nodeTran, cov, step);
}
}
private static double GetCompensationVoltageRough(SrmSettings settings, PeptideDocNode nodePep,
TransitionGroupDocNode nodeGroup, TransitionDocNode nodeTran, CompensationVoltageParameters regression, int step)
{
if (regression == null)
return 0;
return (regression.MinCov + regression.MaxCov)/2 + regression.StepSizeRough*step;
}
private static double GetCompensationVoltageMedium(SrmSettings settings, PeptideDocNode nodePep,
TransitionGroupDocNode nodeGroup, TransitionDocNode nodeTran, CompensationVoltageParameters regression, int step)
{
if (regression == null)
return 0;
double? covRough = OptimizationStep<CompensationVoltageRegressionRough>.FindOptimizedValueFromResults(settings,
nodePep, nodeGroup, null, OptimizedMethodType.Precursor, GetCompensationVoltageRough);
return covRough.HasValue ? covRough.Value + regression.StepSizeMedium*step : 0;
}
public static double GetCompensationVoltageFine(SrmSettings settings, PeptideDocNode nodePep,
TransitionGroupDocNode nodeGroup, TransitionDocNode nodeTran, CompensationVoltageParameters regression, int step)
{
if (regression == null)
return 0;
double? covMedium = OptimizationStep<CompensationVoltageRegressionMedium>.FindOptimizedValueFromResults(settings,
nodePep, nodeGroup, null, OptimizedMethodType.Precursor, GetCompensationVoltageMedium);
return covMedium.HasValue ? covMedium.Value + regression.StepSizeFine*step : 0;
}
public double? GetOptimizedCompensationVoltage(PeptideDocNode nodePep, TransitionGroupDocNode nodeGroup, CompensationVoltageParameters.Tuning tuneLevel)
{
if (nodeGroup.ExplicitValues.CompensationVoltage.HasValue)
return nodeGroup.ExplicitValues.CompensationVoltage.Value;
var prediction = Settings.TransitionSettings.Prediction;
var lib = prediction.OptimizedLibrary;
if (lib != null && !lib.IsNone)
{
var optimization = lib.GetOptimization(CompensationVoltageParameters.GetOptimizationType(tuneLevel),
Settings.GetSourceTarget(nodePep), nodeGroup.PrecursorAdduct);
if (optimization != null)
return optimization.Value;
}
var covMain = prediction.CompensationVoltage;
if (covMain == null)
return null;
switch (tuneLevel)
{
case CompensationVoltageParameters.Tuning.fine:
return OptimizationStep<CompensationVoltageRegressionFine>.FindOptimizedValue(Settings, nodePep,
nodeGroup, null, OptimizedMethodType.Precursor, covMain.RegressionFine,
GetCompensationVoltageFine);
case CompensationVoltageParameters.Tuning.medium:
return OptimizationStep<CompensationVoltageRegressionMedium>.FindOptimizedValue(Settings, nodePep,
nodeGroup, null, OptimizedMethodType.Precursor, covMain.RegressionMedium,
GetCompensationVoltageMedium);
case CompensationVoltageParameters.Tuning.rough:
return OptimizationStep<CompensationVoltageRegressionRough>.FindOptimizedValue(Settings, nodePep,
nodeGroup, null, OptimizedMethodType.Precursor, covMain.RegressionRough,
GetCompensationVoltageRough);
}
return null;
}
#endregion
#region object overrides
public bool Equals(SrmDocument obj)
{
if (ReferenceEquals(null, obj)) return false;
if (ReferenceEquals(this, obj)) return true;
if (!base.Equals(obj))
return false;
if (!Equals(obj.Settings, Settings))
return false;
if (!Equals(obj.DeferSettingsChanges, DeferSettingsChanges))
return false;
return true;
}
public override bool Equals(object obj)
{
if (ReferenceEquals(null, obj)) return false;
if (ReferenceEquals(this, obj)) return true;
return Equals(obj as SrmDocument);
}
public override int GetHashCode()
{
unchecked
{
return (base.GetHashCode()*397) ^ Settings.GetHashCode();
}
}
#endregion
}
public class SrmDocumentPair : ObjectPair<SrmDocument>
{
protected SrmDocumentPair(SrmDocument oldDoc, SrmDocument newDoc, SrmDocument.DOCUMENT_TYPE defaultDocumentTypeForAuditLog)
: base(oldDoc, newDoc)
{
NewDocumentType = newDoc != null && newDoc.DocumentType != SrmDocument.DOCUMENT_TYPE.none
? newDoc.DocumentType
: defaultDocumentTypeForAuditLog;
OldDocumentType = oldDoc != null && oldDoc.DocumentType != SrmDocument.DOCUMENT_TYPE.none
? oldDoc.DocumentType
: NewDocumentType;
}
public static SrmDocumentPair Create(SrmDocument oldDoc, SrmDocument newDoc,
SrmDocument.DOCUMENT_TYPE defaultDocumentTypeForLogging)
{
return new SrmDocumentPair(oldDoc, newDoc, defaultDocumentTypeForLogging);
}
public ObjectPair<object> ToObjectType()
{
return Transform(doc => (object) doc);
}
public SrmDocument OldDoc { get { return OldObject; } }
public SrmDocument NewDoc { get { return NewObject; } }
// Used for "peptide"->"molecule" translation cue in human readable logs
public SrmDocument.DOCUMENT_TYPE OldDocumentType { get; private set; } // Useful when something in document is being removed, which might cause a change from mixed to proteomic but you want to log event as "molecule" rather than "peptide"
public SrmDocument.DOCUMENT_TYPE NewDocumentType { get; private set; } // Useful when something is being added, which might cause a change from proteomic to mixed so you want to log event as "molecule" rather than "peptide"
}
public class Targets
{
private readonly SrmDocument _doc;
public Targets(SrmDocument doc)
{
_doc = doc;
}
[TrackChildren(ignoreName:true)]
public IList<DocNode> Children
{
get { return _doc.Children; }
}
}
}
| 1 | 13,313 | This is a mess. Can it be broken out as a separate function using ifs and return statements, which would also make it more debuggable. | ProteoWizard-pwiz | .cs |
@@ -1457,8 +1457,16 @@ class GenericCompositePlot(DimensionedPlot):
key_map = dict(zip([d.name for d in self.dimensions], key))
for path, item in self.layout.items():
- frame = item.map(lambda x: get_plot_frame(x, key_map, cached=cached),
- ['DynamicMap', 'HoloMap'])
+ clone = item.map(lambda x: x)
+
+ # Ensure that DynamicMaps in the cloned frame have
+ # identical callback inputs to allow memoization to work
+ for it1, it2 in zip(item.traverse(lambda x: x), clone.traverse(lambda x: x)):
+ if isinstance(it1, DynamicMap):
+ with util.disable_constant(it2.callback):
+ it2.callback.inputs = it1.callback.inputs
+ frame = clone.map(lambda x: get_plot_frame(x, key_map, cached=cached),
+ [DynamicMap, HoloMap], clone=False)
if frame is not None:
layout_frame[path] = frame
traverse_setter(self, '_force', False) | 1 | """
Public API for all plots supported by HoloViews, regardless of
plotting package or backend. Every plotting classes must be a subclass
of this Plot baseclass.
"""
import warnings
from itertools import groupby, product
from collections import Counter, defaultdict
import numpy as np
import param
from ..core import OrderedDict
from ..core import util, traversal
from ..core.element import Element, Element3D
from ..core.overlay import Overlay, CompositeOverlay
from ..core.layout import Empty, NdLayout, Layout
from ..core.options import Store, Compositor, SkipRendering
from ..core.overlay import NdOverlay
from ..core.spaces import HoloMap, DynamicMap
from ..core.util import stream_parameters, isfinite
from ..element import Table, Graph
from ..util.transform import dim
from .util import (get_dynamic_mode, initialize_unbounded, dim_axis_label,
attach_streams, traverse_setter, get_nested_streams,
compute_overlayable_zorders, get_plot_frame,
split_dmap_overlay, get_axis_padding, get_range,
get_minimum_span)
class Plot(param.Parameterized):
"""
Base class of all Plot classes in HoloViews, designed to be
general enough to use any plotting package or backend.
"""
# A list of style options that may be supplied to the plotting
# call
style_opts = []
# Sometimes matplotlib doesn't support the common aliases.
# Use this list to disable any invalid style options
_disabled_opts = []
def initialize_plot(self, ranges=None):
"""
Initialize the matplotlib figure.
"""
raise NotImplementedError
def update(self, key):
"""
Update the internal state of the Plot to represent the given
key tuple (where integers represent frames). Returns this
state.
"""
return self.state
@property
def state(self):
"""
The plotting state that gets updated via the update method and
used by the renderer to generate output.
"""
raise NotImplementedError
def cleanup(self):
"""
Cleans up references to the plot on the attached Stream
subscribers.
"""
plots = self.traverse(lambda x: x, [Plot])
for plot in plots:
if not isinstance(plot, (GenericCompositePlot, GenericElementPlot, GenericOverlayPlot)):
continue
for stream in set(plot.streams):
stream._subscribers = [
(p, subscriber) for p, subscriber in stream._subscribers
if util.get_method_owner(subscriber) not in plots]
if self.comm:
self.comm.close()
@property
def id(self):
return self.comm.id if self.comm else id(self.state)
def __len__(self):
"""
Returns the total number of available frames.
"""
raise NotImplementedError
@classmethod
def lookup_options(cls, obj, group):
plot_class = None
try:
plot_class = Store.renderers[cls.backend].plotting_class(obj)
style_opts = plot_class.style_opts
except SkipRendering:
style_opts = None
node = Store.lookup_options(cls.backend, obj, group)
if group == 'style' and style_opts is not None:
return node.filtered(style_opts)
elif group == 'plot' and plot_class:
return node.filtered(list(plot_class.params().keys()))
else:
return node
class PlotSelector(object):
"""
Proxy that allows dynamic selection of a plotting class based on a
function of the plotted object. Behaves like a Plot class and
presents the same parameterized interface.
"""
_disabled_opts = []
def __init__(self, selector, plot_classes, allow_mismatch=False):
"""
The selector function accepts a component instance and returns
the appropriate key to index plot_classes dictionary.
"""
self.selector = selector
self.plot_classes = OrderedDict(plot_classes)
interface = self._define_interface(self.plot_classes.values(), allow_mismatch)
self.style_opts, self.plot_options = interface
def _define_interface(self, plots, allow_mismatch):
parameters = [{k:v.precedence for k,v in plot.params().items()
if ((v.precedence is None) or (v.precedence >= 0))}
for plot in plots]
param_sets = [set(params.keys()) for params in parameters]
if not allow_mismatch and not all(pset == param_sets[0] for pset in param_sets):
raise Exception("All selectable plot classes must have identical plot options.")
styles= [plot.style_opts for plot in plots]
if not allow_mismatch and not all(style == styles[0] for style in styles):
raise Exception("All selectable plot classes must have identical style options.")
plot_params = {p: v for params in parameters for p, v in params.items()}
return [s for style in styles for s in style], plot_params
def __call__(self, obj, **kwargs):
plot_class = self.get_plot_class(obj)
return plot_class(obj, **kwargs)
def get_plot_class(self, obj):
key = self.selector(obj)
if key not in self.plot_classes:
msg = "Key %s returned by selector not in set: %s"
raise Exception(msg % (key, ', '.join(self.plot_classes.keys())))
return self.plot_classes[key]
def __setattr__(self, label, value):
try:
return super(PlotSelector, self).__setattr__(label, value)
except:
raise Exception("Please set class parameters directly on classes %s"
% ', '.join(str(cls) for cls in self.__dict__['plot_classes'].values()))
def params(self):
return self.plot_options
class DimensionedPlot(Plot):
"""
DimensionedPlot implements a number of useful methods
to compute dimension ranges and titles containing the
dimension values.
"""
fontsize = param.Parameter(default=None, allow_None=True, doc="""
Specifies various font sizes of the displayed text.
Finer control is available by supplying a dictionary where any
unmentioned keys revert to the default sizes, e.g:
{'ticks':20, 'title':15,
'ylabel':5, 'xlabel':5, 'zlabel':5,
'legend':8, 'legend_title':13}
You can set the font size of 'zlabel', 'ylabel' and 'xlabel'
together using the 'labels' key.""")
#Allowed fontsize keys
_fontsize_keys = ['xlabel','ylabel', 'zlabel', 'labels',
'xticks', 'yticks', 'zticks', 'ticks',
'minor_xticks', 'minor_yticks', 'minor_ticks',
'title', 'legend', 'legend_title',
]
show_title = param.Boolean(default=True, doc="""
Whether to display the plot title.""")
title_format = param.String(default="{label} {group}\n{dimensions}", doc="""
The formatting string for the title of this plot, allows defining
a label group separator and dimension labels.""")
normalize = param.Boolean(default=True, doc="""
Whether to compute ranges across all Elements at this level
of plotting. Allows selecting normalization at different levels
for nested data containers.""")
projection = param.Parameter(default=None, doc="""
Allows supplying a custom projection to transform the axis
coordinates during display. Example projections include '3d'
and 'polar' projections supported by some backends. Depending
on the backend custom, projection objects may be supplied.""")
def __init__(self, keys=None, dimensions=None, layout_dimensions=None,
uniform=True, subplot=False, adjoined=None, layout_num=0,
style=None, subplots=None, dynamic=False, renderer=None,
comm=None, **params):
self.subplots = subplots
self.adjoined = adjoined
self.dimensions = dimensions
self.layout_num = layout_num
self.layout_dimensions = layout_dimensions
self.subplot = subplot
self.keys = keys
self.uniform = uniform
self.dynamic = dynamic
self.drawn = False
self.handles = {}
self.group = None
self.label = None
self.current_frame = None
self.current_key = None
self.ranges = {}
self.renderer = renderer if renderer else Store.renderers[self.backend].instance()
self.comm = comm
self._force = False
self._updated = False # Whether the plot should be marked as updated
params = {k: v for k, v in params.items()
if k in self.params()}
super(DimensionedPlot, self).__init__(**params)
def __getitem__(self, frame):
"""
Get the state of the Plot for a given frame number.
"""
if isinstance(frame, int) and frame > len(self):
self.warning("Showing last frame available: %d" % len(self))
if not self.drawn: self.handles['fig'] = self.initialize_plot()
if not isinstance(frame, tuple):
frame = self.keys[frame]
self.update_frame(frame)
return self.state
def _get_frame(self, key):
"""
Required on each MPLPlot type to get the data corresponding
just to the current frame out from the object.
"""
pass
def matches(self, spec):
"""
Matches a specification against the current Plot.
"""
if callable(spec) and not isinstance(spec, type): return spec(self)
elif isinstance(spec, type): return isinstance(self, spec)
else:
raise ValueError("Matching specs have to be either a type or a callable.")
def traverse(self, fn=None, specs=None, full_breadth=True):
"""
Traverses any nested DimensionedPlot returning a list
of all plots that match the specs. The specs should
be supplied as a list of either Plot types or callables,
which should return a boolean given the plot class.
"""
accumulator = []
matches = specs is None
if not matches:
for spec in specs:
matches = self.matches(spec)
if matches: break
if matches:
accumulator.append(fn(self) if fn else self)
# Assumes composite objects are iterables
if hasattr(self, 'subplots') and self.subplots:
for el in self.subplots.values():
if el is None:
continue
accumulator += el.traverse(fn, specs, full_breadth)
if not full_breadth: break
return accumulator
def _frame_title(self, key, group_size=2, separator='\n'):
"""
Returns the formatted dimension group strings
for a particular frame.
"""
if self.layout_dimensions is not None:
dimensions, key = zip(*self.layout_dimensions.items())
elif not self.dynamic and (not self.uniform or len(self) == 1) or self.subplot:
return ''
else:
key = key if isinstance(key, tuple) else (key,)
dimensions = self.dimensions
dimension_labels = [dim.pprint_value_string(k) for dim, k in
zip(dimensions, key)]
groups = [', '.join(dimension_labels[i*group_size:(i+1)*group_size])
for i in range(len(dimension_labels))]
return util.bytes_to_unicode(separator.join(g for g in groups if g))
def _fontsize(self, key, label='fontsize', common=True):
if not self.fontsize: return {}
if not isinstance(self.fontsize, dict):
return {label:self.fontsize} if common else {}
unknown_keys = set(self.fontsize.keys()) - set(self._fontsize_keys)
if unknown_keys:
msg = "Popping unknown keys %r from fontsize dictionary.\nValid keys: %r"
self.warning(msg % (list(unknown_keys), self._fontsize_keys))
for key in unknown_keys: self.fontsize.pop(key, None)
if key in self.fontsize:
return {label:self.fontsize[key]}
elif key in ['zlabel', 'ylabel', 'xlabel'] and 'labels' in self.fontsize:
return {label:self.fontsize['labels']}
elif key in ['xticks', 'yticks', 'zticks'] and 'ticks' in self.fontsize:
return {label:self.fontsize['ticks']}
elif key in ['minor_xticks', 'minor_yticks'] and 'minor_ticks' in self.fontsize:
return {label:self.fontsize['minor_ticks']}
else:
return {}
def compute_ranges(self, obj, key, ranges):
"""
Given an object, a specific key, and the normalization options,
this method will find the specified normalization options on
the appropriate OptionTree, group the elements according to
the selected normalization option (i.e. either per frame or
over the whole animation) and finally compute the dimension
ranges in each group. The new set of ranges is returned.
"""
all_table = all(isinstance(el, Table) for el in obj.traverse(lambda x: x, [Element]))
if obj is None or not self.normalize or all_table:
return OrderedDict()
# Get inherited ranges
ranges = self.ranges if ranges is None else dict(ranges)
# Get element identifiers from current object and resolve
# with selected normalization options
norm_opts = self._get_norm_opts(obj)
# Traverse displayed object if normalization applies
# at this level, and ranges for the group have not
# been supplied from a composite plot
return_fn = lambda x: x if isinstance(x, Element) else None
for group, (axiswise, framewise) in norm_opts.items():
elements = []
# Skip if ranges are cached or already computed by a
# higher-level container object.
framewise = framewise or self.dynamic or len(elements) == 1
if group in ranges and (not framewise or ranges is not self.ranges):
continue
elif not framewise: # Traverse to get all elements
elements = obj.traverse(return_fn, [group])
elif key is not None: # Traverse to get elements for each frame
frame = self._get_frame(key)
elements = [] if frame is None else frame.traverse(return_fn, [group])
# Only compute ranges if not axiswise on a composite plot
# or not framewise on a Overlay or ElementPlot
if (not (axiswise and not isinstance(obj, HoloMap)) or
(not framewise and isinstance(obj, HoloMap))):
self._compute_group_range(group, elements, ranges)
self.ranges.update(ranges)
return ranges
def _get_norm_opts(self, obj):
"""
Gets the normalization options for a LabelledData object by
traversing the object to find elements and their ids.
The id is then used to select the appropriate OptionsTree,
accumulating the normalization options into a dictionary.
Returns a dictionary of normalization options for each
element in the tree.
"""
norm_opts = {}
# Get all elements' type.group.label specs and ids
type_val_fn = lambda x: (x.id, (type(x).__name__, util.group_sanitizer(x.group, escape=False),
util.label_sanitizer(x.label, escape=False))) \
if isinstance(x, Element) else None
element_specs = {(idspec[0], idspec[1]) for idspec in obj.traverse(type_val_fn)
if idspec is not None}
# Group elements specs by ID and override normalization
# options sequentially
key_fn = lambda x: -1 if x[0] is None else x[0]
id_groups = groupby(sorted(element_specs, key=key_fn), key_fn)
for gid, element_spec_group in id_groups:
gid = None if gid == -1 else gid
group_specs = [el for _, el in element_spec_group]
backend = self.renderer.backend
optstree = Store.custom_options(
backend=backend).get(gid, Store.options(backend=backend))
# Get the normalization options for the current id
# and match against customizable elements
for opts in optstree:
path = tuple(opts.path.split('.')[1:])
applies = any(path == spec[:i] for spec in group_specs
for i in range(1, 4))
if applies and 'norm' in opts.groups:
nopts = opts['norm'].options
if 'axiswise' in nopts or 'framewise' in nopts:
norm_opts.update({path: (nopts.get('axiswise', False),
nopts.get('framewise', False))})
element_specs = [spec for _, spec in element_specs]
norm_opts.update({spec: (False, False) for spec in element_specs
if not any(spec[:i] in norm_opts.keys() for i in range(1, 4))})
return norm_opts
@classmethod
def _compute_group_range(cls, group, elements, ranges):
# Iterate over all elements in a normalization group
# and accumulate their ranges into the supplied dictionary.
elements = [el for el in elements if el is not None]
group_ranges = OrderedDict()
for el in elements:
if isinstance(el, (Empty, Table)): continue
opts = cls.lookup_options(el, 'style')
plot_opts = cls.lookup_options(el, 'plot')
# Compute normalization for color dim transforms
for k, v in dict(opts.kwargs, **plot_opts.kwargs).items():
if not isinstance(v, dim) or ('color' not in k and k != 'magnitude'):
continue
if isinstance(v, dim) and v.applies(el):
dim_name = repr(v)
values = v.apply(el, expanded=False, all_values=True)
factors = None
if values.dtype.kind == 'M':
drange = values.min(), values.max()
elif util.isscalar(values):
drange = values, values
elif len(values) == 0:
drange = np.NaN, np.NaN
else:
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
drange = (np.nanmin(values), np.nanmax(values))
except:
factors = util.unique_array(values)
if dim_name not in group_ranges:
group_ranges[dim_name] = {'data': [], 'hard': [], 'soft': []}
if factors is not None:
if 'factors' not in group_ranges[dim_name]:
group_ranges[dim_name]['factors'] = []
group_ranges[dim_name]['factors'].append(factors)
else:
group_ranges[dim_name]['data'].append(drange)
# Compute dimension normalization
for el_dim in el.dimensions('ranges'):
if isinstance(el, Graph) and el_dim in el.kdims[:2]:
data_range = el.nodes.range(2, dimension_range=False)
else:
data_range = el.range(el_dim, dimension_range=False)
if el_dim.name not in group_ranges:
group_ranges[el_dim.name] = {'data': [], 'hard': [], 'soft': []}
group_ranges[el_dim.name]['data'].append(data_range)
group_ranges[el_dim.name]['hard'].append(el_dim.range)
group_ranges[el_dim.name]['soft'].append(el_dim.soft_range)
if any(isinstance(r, util.basestring) for r in data_range):
if 'factors' not in group_ranges[el_dim.name]:
group_ranges[el_dim.name]['factors'] = []
if el_dim.values not in ([], None):
values = el_dim.values
elif el_dim in el:
if isinstance(el, Graph) and el_dim in el.kdims[:2]:
# Graph start/end normalization should include all node indices
values = el.nodes.dimension_values(2, expanded=False)
else:
values = el.dimension_values(el_dim, expanded=False)
elif isinstance(el, Graph) and el_dim in el.nodes:
values = el.nodes.dimension_values(el_dim, expanded=False)
factors = util.unique_array(values)
group_ranges[el_dim.name]['factors'].append(factors)
dim_ranges = []
for gdim, values in group_ranges.items():
hard_range = util.max_range(values['hard'], combined=False)
soft_range = util.max_range(values['soft'])
data_range = util.max_range(values['data'])
combined = util.dimension_range(data_range[0], data_range[1],
hard_range, soft_range)
dranges = {'data': data_range, 'hard': hard_range,
'soft': soft_range, 'combined': combined}
if 'factors' in values:
dranges['factors'] = util.unique_array([
v for fctrs in values['factors'] for v in fctrs])
dim_ranges.append((gdim, dranges))
ranges[group] = OrderedDict(dim_ranges)
@classmethod
def _traverse_options(cls, obj, opt_type, opts, specs=None, keyfn=None, defaults=True):
"""
Traverses the supplied object getting all options in opts for
the specified opt_type and specs. Also takes into account the
plotting class defaults for plot options. If a keyfn is
supplied the returned options will be grouped by the returned
keys.
"""
def lookup(x):
"""
Looks up options for object, including plot defaults.
keyfn determines returned key otherwise None key is used.
"""
options = cls.lookup_options(x, opt_type)
selected = {o: options.options[o]
for o in opts if o in options.options}
if opt_type == 'plot' and defaults:
plot = Store.registry[cls.backend].get(type(x))
selected['defaults'] = {o: getattr(plot, o) for o in opts
if o not in selected and hasattr(plot, o)}
key = keyfn(x) if keyfn else None
return (key, selected)
# Traverse object and accumulate options by key
traversed = obj.traverse(lookup, specs)
options = defaultdict(lambda: defaultdict(list))
default_opts = defaultdict(lambda: defaultdict(list))
for key, opts in traversed:
defaults = opts.pop('defaults', {})
for opt, v in opts.items():
options[key][opt].append(v)
for opt, v in defaults.items():
default_opts[key][opt].append(v)
# Merge defaults into dictionary if not explicitly specified
for key, opts in default_opts.items():
for opt, v in opts.items():
if opt not in options[key]:
options[key][opt] = v
return options if keyfn else options[None]
def _get_projection(cls, obj):
"""
Uses traversal to find the appropriate projection
for a nested object. Respects projections set on
Overlays before considering Element based settings,
before finally looking up the default projection on
the plot type. If more than one non-None projection
type is found an exception is raised.
"""
isoverlay = lambda x: isinstance(x, CompositeOverlay)
element3d = obj.traverse(lambda x: x, [Element3D])
if element3d:
return '3d'
opts = cls._traverse_options(obj, 'plot', ['projection'],
[CompositeOverlay, Element],
keyfn=isoverlay)
from_overlay = not all(p is None for p in opts[True]['projection'])
projections = opts[from_overlay]['projection']
custom_projs = [p for p in projections if p is not None]
if len(set(custom_projs)) > 1:
raise Exception("An axis may only be assigned one projection type")
return custom_projs[0] if custom_projs else None
def update(self, key):
if len(self) == 1 and ((key == 0) or (key == self.keys[0])) and not self.drawn:
return self.initialize_plot()
item = self.__getitem__(key)
self.traverse(lambda x: setattr(x, '_updated', True))
return item
def refresh(self, **kwargs):
"""
Refreshes the plot by rerendering it and then pushing
the updated data if the plot has an associated Comm.
"""
traverse_setter(self, '_force', True)
key = self.current_key if self.current_key else self.keys[0]
dim_streams = [stream for stream in self.streams
if any(c in self.dimensions for c in stream.contents)]
stream_params = stream_parameters(dim_streams)
key = tuple(None if d in stream_params else k
for d, k in zip(self.dimensions, key))
stream_key = util.wrap_tuple_streams(key, self.dimensions, self.streams)
# Update if not top-level, batched or an ElementPlot
if not self.top_level or isinstance(self, GenericElementPlot):
self.update(stream_key)
if self.comm is not None and self.top_level:
self.push()
def push(self):
"""
Pushes updated plot data via the Comm.
"""
if self.comm is None:
raise Exception('Renderer does not have a comm.')
diff = self.renderer.diff(self)
self.comm.send(diff)
def init_comm(self):
"""
Initializes comm and attaches streams.
"""
if self.comm:
return self.comm
comm = None
if self.dynamic or self.renderer.widget_mode == 'live':
comm = self.renderer.comm_manager.get_server_comm()
return comm
def __len__(self):
"""
Returns the total number of available frames.
"""
return len(self.keys)
class GenericElementPlot(DimensionedPlot):
"""
Plotting baseclass to render contents of an Element. Implements
methods to get the correct frame given a HoloMap, axis labels and
extents and titles.
"""
apply_ranges = param.Boolean(default=True, doc="""
Whether to compute the plot bounds from the data itself.""")
apply_extents = param.Boolean(default=True, doc="""
Whether to apply extent overrides on the Elements""")
bgcolor = param.ClassSelector(class_=(str, tuple), default=None, doc="""
If set bgcolor overrides the background color of the axis.""")
default_span = param.ClassSelector(default=2.0, class_=(int, float, tuple), doc="""
Defines the span of an axis if the axis range is zero, i.e. if
the lower and upper end of an axis are equal or no range is
defined at all. For example if there is a single datapoint at
0 a default_span of 2.0 will result in axis ranges spanning
from -1 to 1.""")
invert_axes = param.Boolean(default=False, doc="""
Whether to invert the x- and y-axis""")
invert_xaxis = param.Boolean(default=False, doc="""
Whether to invert the plot x-axis.""")
invert_yaxis = param.Boolean(default=False, doc="""
Whether to invert the plot y-axis.""")
logx = param.Boolean(default=False, doc="""
Whether the x-axis of the plot will be a log axis.""")
logy = param.Boolean(default=False, doc="""
Whether the y-axis of the plot will be a log axis.""")
padding = param.ClassSelector(default=0, class_=(int, float, tuple), doc="""
Fraction by which to increase auto-ranged extents to make
datapoints more visible around borders.
To compute padding, the axis whose screen size is largest is
chosen, and the range of that axis is increased by the
specified fraction along each axis. Other axes are then
padded ensuring that the amount of screen space devoted to
padding is equal for all axes. If specified as a tuple, the
int or float values in the tuple will be used for padding in
each axis, in order (x,y or x,y,z).
For example, for padding=0.2 on a 800x800-pixel plot, an x-axis
with the range [0,10] will be padded by 20% to be [-1,11], while
a y-axis with a range [0,1000] will be padded to be [-100,1100],
which should make the padding be approximately the same number of
pixels. But if the same plot is changed to have a height of only
200, the y-range will then be [-400,1400] so that the y-axis
padding will still match that of the x-axis.
It is also possible to declare non-equal padding value for the
lower and upper bound of an axis by supplying nested tuples,
e.g. padding=(0.1, (0, 0.1)) will pad the x-axis lower and
upper bound as well as the y-axis upper bound by a fraction of
0.1 while the y-axis lower bound is not padded at all.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
show_grid = param.Boolean(default=False, doc="""
Whether to show a Cartesian grid on the plot.""")
xaxis = param.ObjectSelector(default='bottom',
objects=['top', 'bottom', 'bare', 'top-bare',
'bottom-bare', None, True, False], doc="""
Whether and where to display the xaxis.
The "bare" options allow suppressing all axis labels, including ticks and xlabel.
Valid options are 'top', 'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
yaxis = param.ObjectSelector(default='left',
objects=['left', 'right', 'bare', 'left-bare',
'right-bare', None, True, False], doc="""
Whether and where to display the yaxis.
The "bare" options allow suppressing all axis labels, including ticks and ylabel.
Valid options are 'left', 'right', 'bare', 'left-bare' and 'right-bare'.""")
xlabel = param.String(default=None, doc="""
An explicit override of the x-axis label, if set takes precedence
over the dimension label.""")
ylabel = param.String(default=None, doc="""
An explicit override of the y-axis label, if set takes precedence
over the dimension label.""")
xlim = param.NumericTuple(default=(np.nan, np.nan), length=2, doc="""
User-specified x-axis range limits for the plot, as a tuple (low,high).
If specified, takes precedence over data and dimension ranges.""")
ylim = param.NumericTuple(default=(np.nan, np.nan), length=2, doc="""
User-specified x-axis range limits for the plot, as a tuple (low,high).
If specified, takes precedence over data and dimension ranges.""")
zlim = param.NumericTuple(default=(np.nan, np.nan), length=2, doc="""
User-specified z-axis range limits for the plot, as a tuple (low,high).
If specified, takes precedence over data and dimension ranges.""")
xrotation = param.Integer(default=None, bounds=(0, 360), doc="""
Rotation angle of the xticks.""")
yrotation = param.Integer(default=None, bounds=(0, 360), doc="""
Rotation angle of the yticks.""")
xticks = param.Parameter(default=None, doc="""
Ticks along x-axis specified as an integer, explicit list of
tick locations, or bokeh Ticker object. If set to None default
bokeh ticking behavior is applied.""")
yticks = param.Parameter(default=None, doc="""
Ticks along y-axis specified as an integer, explicit list of
tick locations, or bokeh Ticker object. If set to None
default bokeh ticking behavior is applied.""")
# A dictionary mapping of the plot methods used to draw the
# glyphs corresponding to the ElementPlot, can support two
# keyword arguments a 'single' implementation to draw an individual
# plot and a 'batched' method to draw multiple Elements at once
_plot_methods = {}
# Declares the options that are propagated from sub-elements of the
# plot, mostly useful for inheriting options from individual
# Elements on an OverlayPlot. Enabled by default in v1.7.
_propagate_options = []
v17_option_propagation = True
def __init__(self, element, keys=None, ranges=None, dimensions=None,
batched=False, overlaid=0, cyclic_index=0, zorder=0, style=None,
overlay_dims={}, stream_sources=[], streams=None, **params):
self.zorder = zorder
self.cyclic_index = cyclic_index
self.overlaid = overlaid
self.batched = batched
self.overlay_dims = overlay_dims
if not isinstance(element, (HoloMap, DynamicMap)):
self.hmap = HoloMap(initial_items=(0, element),
kdims=['Frame'], id=element.id)
else:
self.hmap = element
if overlaid:
self.stream_sources = stream_sources
else:
self.stream_sources = compute_overlayable_zorders(self.hmap)
plot_element = self.hmap.last
if self.batched and not isinstance(self, GenericOverlayPlot):
plot_element = plot_element.last
dynamic = isinstance(element, DynamicMap) and not element.unbounded
self.top_level = keys is None
if self.top_level:
dimensions = self.hmap.kdims
keys = list(self.hmap.data.keys())
self.style = self.lookup_options(plot_element, 'style') if style is None else style
plot_opts = self.lookup_options(plot_element, 'plot').options
if self.v17_option_propagation:
inherited = self._traverse_options(plot_element, 'plot',
self._propagate_options,
defaults=False)
plot_opts.update(**{k: v[0] for k, v in inherited.items()
if k not in plot_opts})
super(GenericElementPlot, self).__init__(keys=keys, dimensions=dimensions,
dynamic=dynamic,
**dict(params, **plot_opts))
self.streams = get_nested_streams(self.hmap) if streams is None else streams
if self.top_level:
self.comm = self.init_comm()
self.traverse(lambda x: setattr(x, 'comm', self.comm))
# Attach streams if not overlaid and not a batched ElementPlot
if not (self.overlaid or (self.batched and not isinstance(self, GenericOverlayPlot))):
attach_streams(self, self.hmap)
# Update plot and style options for batched plots
if self.batched:
self.ordering = util.layer_sort(self.hmap)
overlay_opts = self.lookup_options(self.hmap.last, 'plot').options.items()
opts = {k: v for k, v in overlay_opts if k in self.params()}
self.set_param(**opts)
self.style = self.lookup_options(plot_element, 'style').max_cycles(len(self.ordering))
else:
self.ordering = []
def get_zorder(self, overlay, key, el):
"""
Computes the z-order of element in the NdOverlay
taking into account possible batching of elements.
"""
spec = util.get_overlay_spec(overlay, key, el)
return self.ordering.index(spec)
def _updated_zorders(self, overlay):
specs = [util.get_overlay_spec(overlay, key, el)
for key, el in overlay.data.items()]
self.ordering = sorted(set(self.ordering+specs))
return [self.ordering.index(spec) for spec in specs]
def _get_frame(self, key):
if isinstance(self.hmap, DynamicMap) and self.overlaid and self.current_frame:
self.current_key = key
return self.current_frame
elif key == self.current_key and not self._force:
return self.current_frame
cached = self.current_key is None
key_map = dict(zip([d.name for d in self.dimensions], key))
frame = get_plot_frame(self.hmap, key_map, cached)
traverse_setter(self, '_force', False)
if not key in self.keys and self.dynamic:
self.keys.append(key)
self.current_frame = frame
self.current_key = key
return frame
def _execute_hooks(self, element):
"""
Executes finalize hooks
"""
if self.hooks and self.finalize_hooks:
self.warning("Supply either hooks or finalize_hooks not both, "
"using hooks and ignoring finalize_hooks.")
hooks = self.hooks or self.finalize_hooks
for hook in hooks:
try:
hook(self, element)
except Exception as e:
self.warning("Plotting hook %r could not be applied:\n\n %s" % (hook, e))
def get_aspect(self, xspan, yspan):
"""
Should define the aspect ratio of the plot.
"""
def get_padding(self, extents):
"""
Computes padding along the axes taking into account the plot aspect.
"""
(x0, y0, z0, x1, y1, z1) = extents
padding = 0 if self.overlaid else self.padding
xpad, ypad, zpad = get_axis_padding(padding)
if not self.overlaid and not self.batched:
xspan = x1-x0 if util.is_number(x0) and util.is_number(x1) else None
yspan = y1-y0 if util.is_number(y0) and util.is_number(y1) else None
aspect = self.get_aspect(xspan, yspan)
if aspect > 1:
xpad = tuple(xp/aspect for xp in xpad) if isinstance(xpad, tuple) else xpad/aspect
else:
ypad = tuple(yp*aspect for yp in ypad) if isinstance(ypad, tuple) else ypad*aspect
return xpad, ypad, zpad
def _get_range_extents(self, element, ranges, range_type, xdim, ydim, zdim):
dims = element.dimensions()
ndims = len(dims)
xdim = xdim or (dims[0] if ndims else None)
ydim = ydim or (dims[1] if ndims > 1 else None)
if self.projection == '3d':
zdim = zdim or (dims[2] if ndims > 2 else None)
else:
zdim = None
(x0, x1), xsrange, xhrange = get_range(element, ranges, xdim)
(y0, y1), ysrange, yhrange = get_range(element, ranges, ydim)
(z0, z1), zsrange, zhrange = get_range(element, ranges, zdim)
if not self.overlaid and not self.batched:
xspan, yspan, zspan = (v/2. for v in get_axis_padding(self.default_span))
x0, x1 = get_minimum_span(x0, x1, xspan)
y0, y1 = get_minimum_span(y0, y1, yspan)
z0, z1 = get_minimum_span(z0, z1, zspan)
xpad, ypad, zpad = self.get_padding((x0, y0, z0, x1, y1, z1))
if range_type == 'soft':
x0, x1 = xsrange
elif range_type == 'hard':
x0, x1 = xhrange
elif xdim == 'categorical':
x0, x1 = '', ''
elif range_type == 'combined':
x0, x1 = util.dimension_range(x0, x1, xhrange, xsrange, xpad, self.logx)
if range_type == 'soft':
y0, y1 = ysrange
elif range_type == 'hard':
y0, y1 = yhrange
elif range_type == 'combined':
y0, y1 = util.dimension_range(y0, y1, yhrange, ysrange, ypad, self.logy)
elif ydim == 'categorical':
y0, y1 = '', ''
elif ydim is None:
y0, y1 = np.NaN, np.NaN
if self.projection == '3d':
if range_type == 'soft':
z0, z1 = zsrange
elif range_type == 'data':
z0, z1 = zhrange
elif range_type=='combined':
z0, z1 = util.dimension_range(z0, z1, zhrange, zsrange, zpad, self.logz)
elif zdim == 'categorical':
z0, z1 = '', ''
elif zdim is None:
z0, z1 = np.NaN, np.NaN
return (x0, y0, z0, x1, y1, z1)
return (x0, y0, x1, y1)
def get_extents(self, element, ranges, range_type='combined', xdim=None, ydim=None, zdim=None):
"""
Gets the extents for the axes from the current Element. The globally
computed ranges can optionally override the extents.
The extents are computed by combining the data ranges, extents
and dimension ranges. Each of these can be obtained individually
by setting the range_type to one of:
* 'data': Just the data ranges
* 'extents': Element.extents
* 'soft': Dimension.soft_range values
* 'hard': Dimension.range values
To obtain the combined range, which includes range padding the
default may be used:
* 'combined': All the range types combined and padding applied
This allows Overlay plots to obtain each range and combine them
appropriately for all the objects in the overlay.
"""
num = 6 if self.projection == '3d' else 4
if self.apply_extents and range_type in ('combined', 'extents'):
norm_opts = self.lookup_options(element, 'norm').options
if norm_opts.get('framewise', False) or self.dynamic:
extents = element.extents
else:
extent_list = self.hmap.traverse(lambda x: x.extents, [Element])
extents = util.max_extents(extent_list, self.projection == '3d')
else:
extents = (np.NaN,) * num
if range_type == 'extents':
return extents
if self.apply_ranges:
range_extents = self._get_range_extents(element, ranges, range_type, xdim, ydim, zdim)
else:
range_extents = (np.NaN,) * num
if getattr(self, 'shared_axes', False) and self.subplot:
combined = util.max_extents([range_extents, extents], self.projection == '3d')
else:
max_extent = []
for l1, l2 in zip(range_extents, extents):
if isfinite(l2):
max_extent.append(l2)
else:
max_extent.append(l1)
combined = tuple(max_extent)
if self.projection == '3d':
x0, y0, z0, x1, y1, z1 = combined
else:
x0, y0, x1, y1 = combined
x0, x1 = util.dimension_range(x0, x1, self.xlim, (None, None))
y0, y1 = util.dimension_range(y0, y1, self.ylim, (None, None))
if self.projection == '3d':
z0, z1 = util.dimension_range(z0, z1, self.zlim, (None, None))
return (x0, y0, z0, x1, y1, z1)
return (x0, y0, x1, y1)
def _get_axis_labels(self, dimensions, xlabel=None, ylabel=None, zlabel=None):
if self.xlabel is not None:
xlabel = self.xlabel
elif dimensions and xlabel is None:
xdims = dimensions[0]
xlabel = dim_axis_label(xdims) if xdims else ''
if self.ylabel is not None:
ylabel = self.ylabel
elif len(dimensions) >= 2 and ylabel is None:
ydims = dimensions[1]
ylabel = dim_axis_label(ydims) if ydims else ''
if getattr(self, 'zlabel', None) is not None:
zlabel = self.zlabel
elif self.projection == '3d' and len(dimensions) >= 3 and zlabel is None:
zlabel = dim_axis_label(dimensions[2]) if dimensions[2] else ''
return xlabel, ylabel, zlabel
def _format_title(self, key, dimensions=True, separator='\n'):
frame = self._get_frame(key)
if frame is None: return None
type_name = type(frame).__name__
group = frame.group if frame.group != type_name else ''
label = frame.label
if self.layout_dimensions:
dim_title = self._frame_title(key, separator=separator)
title = dim_title
else:
if dimensions:
dim_title = self._frame_title(key, separator=separator)
else:
dim_title = ''
title_format = util.bytes_to_unicode(self.title_format)
title = title_format.format(label=util.bytes_to_unicode(label),
group=util.bytes_to_unicode(group),
type=type_name,
dimensions=dim_title)
return title.strip(' \n')
def update_frame(self, key, ranges=None):
"""
Set the plot(s) to the given frame number. Operates by
manipulating the matplotlib objects held in the self._handles
dictionary.
If n is greater than the number of available frames, update
using the last available frame.
"""
class GenericOverlayPlot(GenericElementPlot):
"""
Plotting baseclass to render (Nd)Overlay objects. It implements
methods to handle the creation of ElementPlots, coordinating style
groupings and zorder for all layers across a HoloMap. It also
allows collapsing of layers via the Compositor.
"""
batched = param.Boolean(default=True, doc="""
Whether to plot Elements NdOverlay in a batched plotting call
if possible. Disables legends and zorder may not be preserved.""")
legend_limit = param.Integer(default=25, doc="""
Number of rendered glyphs before legends are disabled.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
style_grouping = param.Integer(default=2, doc="""
The length of the type.group.label spec that will be used to
group Elements into style groups. A style_grouping value of
1 will group just by type, a value of 2 will group by type and
group, and a value of 3 will group by the full specification.""")
_passed_handles = []
def __init__(self, overlay, ranges=None, batched=True, keys=None, group_counter=None, **params):
if 'projection' not in params:
params['projection'] = self._get_projection(overlay)
super(GenericOverlayPlot, self).__init__(overlay, ranges=ranges, keys=keys,
batched=batched, **params)
# Apply data collapse
self.hmap = self._apply_compositor(self.hmap, ranges, self.keys)
self.map_lengths = Counter()
self.group_counter = Counter() if group_counter is None else group_counter
self.cyclic_index_lookup = {}
self.zoffset = 0
self.subplots = self._create_subplots(ranges)
self.traverse(lambda x: setattr(x, 'comm', self.comm))
self.top_level = keys is None
self.dynamic_subplots = []
if self.top_level:
self.comm = self.init_comm()
self.traverse(lambda x: setattr(x, 'comm', self.comm))
self.traverse(lambda x: attach_streams(self, x.hmap, 2),
[GenericElementPlot])
def _apply_compositor(self, holomap, ranges=None, keys=None, dimensions=None):
"""
Given a HoloMap compute the appropriate (mapwise or framewise)
ranges in order to apply the Compositor collapse operations in
display mode (data collapse should already have happened).
"""
# Compute framewise normalization
defaultdim = holomap.ndims == 1 and holomap.kdims[0].name != 'Frame'
if keys and ranges and dimensions and not defaultdim:
dim_inds = [dimensions.index(d) for d in holomap.kdims]
sliced_keys = [tuple(k[i] for i in dim_inds) for k in keys]
frame_ranges = OrderedDict([(slckey, self.compute_ranges(holomap, key, ranges[key]))
for key, slckey in zip(keys, sliced_keys) if slckey in holomap.data.keys()])
else:
mapwise_ranges = self.compute_ranges(holomap, None, None)
frame_ranges = OrderedDict([(key, self.compute_ranges(holomap, key, mapwise_ranges))
for key in holomap.data.keys()])
ranges = frame_ranges.values()
return Compositor.collapse(holomap, (ranges, frame_ranges.keys()), mode='display')
def _create_subplots(self, ranges):
# Check if plot should be batched
ordering = util.layer_sort(self.hmap)
batched = self.batched and type(self.hmap.last) is NdOverlay
if batched:
backend = self.renderer.backend
batchedplot = Store.registry[backend].get(self.hmap.last.type)
if (batched and batchedplot and 'batched' in batchedplot._plot_methods and
(not self.show_legend or len(ordering) > self.legend_limit)):
self.batched = True
keys, vmaps = [()], [self.hmap]
else:
self.batched = False
keys, vmaps = self.hmap._split_overlays()
if isinstance(self.hmap, DynamicMap):
dmap_streams = [get_nested_streams(layer) for layer in
split_dmap_overlay(self.hmap)]
else:
dmap_streams = [None]*len(keys)
# Compute global ordering
length = self.style_grouping
group_fn = lambda x: (x.type.__name__, x.last.group, x.last.label)
for m in vmaps:
self.map_lengths[group_fn(m)[:length]] += 1
subplots = OrderedDict()
for (key, vmap, streams) in zip(keys, vmaps, dmap_streams):
subplot = self._create_subplot(key, vmap, streams, ranges)
if subplot is None:
continue
if not isinstance(key, tuple): key = (key,)
subplots[key] = subplot
if isinstance(subplot, GenericOverlayPlot):
self.zoffset += len(subplot.subplots.keys()) - 1
if not subplots:
raise SkipRendering("%s backend could not plot any Elements "
"in the Overlay." % self.renderer.backend)
return subplots
def _create_subplot(self, key, obj, streams, ranges):
registry = Store.registry[self.renderer.backend]
ordering = util.layer_sort(self.hmap)
overlay_type = 1 if self.hmap.type == Overlay else 2
group_fn = lambda x: (x.type.__name__, x.last.group, x.last.label)
opts = {'overlaid': overlay_type}
if self.hmap.type == Overlay:
style_key = (obj.type.__name__,) + key
if self.overlay_dims:
opts['overlay_dims'] = self.overlay_dims
else:
if not isinstance(key, tuple): key = (key,)
style_key = group_fn(obj) + key
opts['overlay_dims'] = OrderedDict(zip(self.hmap.last.kdims, key))
if self.batched:
vtype = type(obj.last.last)
oidx = 0
else:
vtype = type(obj.last)
if style_key not in ordering:
ordering.append(style_key)
oidx = ordering.index(style_key)
plottype = registry.get(vtype, None)
if plottype is None:
self.warning("No plotting class for %s type and %s backend "
"found. " % (vtype.__name__, self.renderer.backend))
return None
# Get zorder and style counter
length = self.style_grouping
group_key = style_key[:length]
zorder = self.zorder + oidx + self.zoffset
cyclic_index = self.group_counter[group_key]
self.cyclic_index_lookup[style_key] = cyclic_index
self.group_counter[group_key] += 1
group_length = self.map_lengths[group_key]
if not isinstance(plottype, PlotSelector) and issubclass(plottype, GenericOverlayPlot):
opts['group_counter'] = self.group_counter
opts['show_legend'] = self.show_legend
if not any(len(frame) for frame in obj):
self.warning('%s is empty and will be skipped during plotting'
% obj.last)
return None
elif self.batched and 'batched' in plottype._plot_methods:
param_vals = dict(self.get_param_values())
propagate = {opt: param_vals[opt] for opt in self._propagate_options
if opt in param_vals}
opts['batched'] = self.batched
opts['overlaid'] = self.overlaid
opts.update(propagate)
if len(ordering) > self.legend_limit:
opts['show_legend'] = False
style = self.lookup_options(obj.last, 'style').max_cycles(group_length)
passed_handles = {k: v for k, v in self.handles.items()
if k in self._passed_handles}
plotopts = dict(opts, cyclic_index=cyclic_index,
invert_axes=self.invert_axes,
dimensions=self.dimensions, keys=self.keys,
layout_dimensions=self.layout_dimensions,
ranges=ranges, show_title=self.show_title,
style=style, uniform=self.uniform,
fontsize=self.fontsize, streams=streams,
renderer=self.renderer, adjoined=self.adjoined,
stream_sources=self.stream_sources,
projection=self.projection,
zorder=zorder, **passed_handles)
return plottype(obj, **plotopts)
def _create_dynamic_subplots(self, key, items, ranges, **init_kwargs):
"""
Handles the creation of new subplots when a DynamicMap returns
a changing set of elements in an Overlay.
"""
length = self.style_grouping
group_fn = lambda x: (x.type.__name__, x.last.group, x.last.label)
for i, (k, obj) in enumerate(items):
vmap = self.hmap.clone([(key, obj)])
self.map_lengths[group_fn(vmap)[:length]] += 1
subplot = self._create_subplot(k, vmap, [], ranges)
if subplot is None:
continue
self.subplots[k] = subplot
subplot.initialize_plot(ranges, **init_kwargs)
subplot.update_frame(key, ranges, element=obj)
self.dynamic_subplots.append(subplot)
def _update_subplot(self, subplot, spec):
"""
Updates existing subplots when the subplot has been assigned
to plot an element that is not an exact match to the object
it was initially assigned.
"""
# See if the precise spec has already been assigned a cyclic
# index otherwise generate a new one
if spec in self.cyclic_index_lookup:
cyclic_index = self.cyclic_index_lookup[spec]
else:
group_key = spec[:self.style_grouping]
self.group_counter[group_key] += 1
cyclic_index = self.group_counter[group_key]
self.cyclic_index_lookup[spec] = cyclic_index
subplot.cyclic_index = cyclic_index
if subplot.overlay_dims:
odim_key = util.wrap_tuple(spec[-1])
new_dims = zip(subplot.overlay_dims, odim_key)
subplot.overlay_dims = util.OrderedDict(new_dims)
def _get_subplot_extents(self, overlay, ranges, range_type):
"""
Iterates over all subplots and collects the extents of each.
"""
if range_type == 'combined':
extents = {'extents': [], 'soft': [], 'hard': [], 'data': []}
else:
extents = {range_type: []}
items = overlay.items()
if self.batched and self.subplots:
subplot = list(self.subplots.values())[0]
subplots = [(k, subplot) for k in overlay.data.keys()]
else:
subplots = self.subplots.items()
for key, subplot in subplots:
found = False
if subplot is None:
continue
layer = overlay.data.get(key, None)
if isinstance(self.hmap, DynamicMap) and layer is None:
for _, layer in items:
if isinstance(layer, subplot.hmap.type):
found = True
break
if not found:
layer = None
if layer is None or not subplot.apply_ranges:
continue
if isinstance(layer, CompositeOverlay):
sp_ranges = ranges
else:
sp_ranges = util.match_spec(layer, ranges) if ranges else {}
for rt in extents:
extent = subplot.get_extents(layer, sp_ranges, range_type=rt)
extents[rt].append(extent)
return extents
def get_extents(self, overlay, ranges, range_type='combined'):
subplot_extents = self._get_subplot_extents(overlay, ranges, range_type)
zrange = self.projection == '3d'
extents = {k: util.max_extents(rs, zrange) for k, rs in subplot_extents.items()}
if range_type != 'combined':
return extents[range_type]
# Unpack extents
if len(extents['data']) == 6:
x0, y0, z0, x1, y1, z1 = extents['data']
sx0, sy0, sz0, sx1, sy1, sz1 = extents['soft']
hx0, hy0, hz0, hx1, hy1, hz1 = extents['hard']
else:
x0, y0, x1, y1 = extents['data']
sx0, sy0, sx1, sy1 = extents['soft']
hx0, hy0, hx1, hy1 = extents['hard']
z0, z1 = np.NaN, np.NaN
# Apply minimum span
xspan, yspan, zspan = (v/2. for v in get_axis_padding(self.default_span))
x0, x1 = get_minimum_span(x0, x1, xspan)
y0, y1 = get_minimum_span(y0, y1, yspan)
z0, z1 = get_minimum_span(z0, z1, zspan)
# Apply padding
xpad, ypad, zpad = self.get_padding((x0, y0, z0, x1, y1, z1))
x0, x1 = util.dimension_range(x0, x1, (hx0, hx1), (sx0, sx1), xpad, self.logx)
y0, y1 = util.dimension_range(y0, y1, (hy0, hy1), (sy0, sy1), ypad, self.logy)
if len(extents['data']) == 6:
z0, z1 = util.dimension_range(z0, z1, (hz0, hz1), (sz0, sz1), zpad, self.logz)
padded = (x0, y0, z0, x1, y1, z1)
else:
padded = (x0, y0, x1, y1)
# Combine with Element.extents
combined = util.max_extents([padded, extents['extents']], zrange)
if self.projection == '3d':
x0, y0, z0, x1, y1, z1 = combined
else:
x0, y0, x1, y1 = combined
# Apply xlim, ylim, zlim plot option
x0, x1 = util.dimension_range(x0, x1, self.xlim, (None, None))
y0, y1 = util.dimension_range(y0, y1, self.ylim, (None, None))
if self.projection == '3d':
z0, z1 = util.dimension_range(z0, z1, getattr(self, 'zlim', (None, None)), (None, None))
return (x0, y0, z0, x1, y1, z1)
return (x0, y0, x1, y1)
class GenericCompositePlot(DimensionedPlot):
def __init__(self, layout, keys=None, dimensions=None, **params):
if 'uniform' not in params:
params['uniform'] = traversal.uniform(layout)
self.top_level = keys is None
if self.top_level:
dimensions, keys = traversal.unique_dimkeys(layout)
dynamic, unbounded = get_dynamic_mode(layout)
if unbounded:
initialize_unbounded(layout, dimensions, keys[0])
self.layout = layout
super(GenericCompositePlot, self).__init__(keys=keys,
dynamic=dynamic,
dimensions=dimensions,
**params)
nested_streams = layout.traverse(lambda x: get_nested_streams(x),
[DynamicMap])
self.streams = list(set([s for streams in nested_streams for s in streams]))
self._link_dimensioned_streams()
def _link_dimensioned_streams(self):
"""
Should perform any linking required to update titles when dimensioned
streams change.
"""
def _get_frame(self, key):
"""
Creates a clone of the Layout with the nth-frame for each
Element.
"""
cached = self.current_key is None
layout_frame = self.layout.clone(shared_data=False)
if key == self.current_key and not self._force:
return self.current_frame
else:
self.current_key = key
key_map = dict(zip([d.name for d in self.dimensions], key))
for path, item in self.layout.items():
frame = item.map(lambda x: get_plot_frame(x, key_map, cached=cached),
['DynamicMap', 'HoloMap'])
if frame is not None:
layout_frame[path] = frame
traverse_setter(self, '_force', False)
self.current_frame = layout_frame
return layout_frame
def _format_title(self, key, dimensions=True, separator='\n'):
dim_title = self._frame_title(key, 3, separator) if dimensions else ''
layout = self.layout
type_name = type(self.layout).__name__
group = util.bytes_to_unicode(layout.group if layout.group != type_name else '')
label = util.bytes_to_unicode(layout.label)
title = util.bytes_to_unicode(self.title_format).format(label=label,
group=group,
type=type_name,
dimensions=dim_title)
return title.strip(' \n')
class GenericLayoutPlot(GenericCompositePlot):
"""
A GenericLayoutPlot accepts either a Layout or a NdLayout and
displays the elements in a cartesian grid in scanline order.
"""
transpose = param.Boolean(default=False, doc="""
Whether to transpose the layout when plotting. Switches
from row-based left-to-right and top-to-bottom scanline order
to column-based top-to-bottom and left-to-right order.""")
def __init__(self, layout, **params):
if not isinstance(layout, (NdLayout, Layout)):
raise ValueError("GenericLayoutPlot only accepts Layout objects.")
if len(layout.values()) == 0:
raise SkipRendering(warn=False)
super(GenericLayoutPlot, self).__init__(layout, **params)
self.subplots = {}
self.rows, self.cols = layout.shape[::-1] if self.transpose else layout.shape
self.coords = list(product(range(self.rows),
range(self.cols)))
| 1 | 22,167 | This stuff is a bit ugly and should probably become a utility. The main problem is that ``.map`` is being abused a bit. | holoviz-holoviews | py |
@@ -1128,6 +1128,9 @@ class ShareableResource(UserResource):
filters = super(ShareableResource, self)._extract_filters(queryparams)
ids = self.context.shared_ids
+ if getattr(self.context, "forced_empty_list", False):
+ ids = ['UNKNOWN']
+
if ids:
filter_by_id = Filter(self.model.id_field, ids, COMPARISON.IN)
filters.insert(0, filter_by_id) | 1 | import re
import functools
import warnings
import colander
import venusian
import six
from pyramid import exceptions as pyramid_exceptions
from pyramid.decorator import reify
from pyramid.httpexceptions import (HTTPNotModified, HTTPPreconditionFailed,
HTTPNotFound, HTTPConflict,
HTTPServiceUnavailable)
from kinto.core import logger
from kinto.core import Service
from kinto.core.errors import http_error, raise_invalid, send_alert, ERRORS
from kinto.core.events import ACTIONS
from kinto.core.storage import exceptions as storage_exceptions, Filter, Sort
from kinto.core.utils import (
COMPARISON, classname, native_value, decode64, encode64, json,
encode_header, decode_header, DeprecatedMeta, dict_subset
)
from .model import Model, ShareableModel
from .schema import ResourceSchema
from .viewset import ViewSet, ShareableViewSet
def register(depth=1, **kwargs):
"""Ressource class decorator.
Register the decorated class in the cornice registry.
Pass all its keyword arguments to the register_resource
function.
"""
def wrapped(resource):
register_resource(resource, depth=depth + 1, **kwargs)
return resource
return wrapped
def register_resource(resource_cls, settings=None, viewset=None, depth=1,
**kwargs):
"""Register a resource in the cornice registry.
:param resource_cls:
The resource class to register.
It should be a class or have a "name" attribute.
:param viewset:
A ViewSet object, which will be used to find out which arguments should
be appended to the views, and where the views are.
:param depth:
A depth offset. It will be used to determine what is the level of depth
in the call tree. (set to 1 by default.)
Any additional keyword parameters will be used to override the viewset
attributes.
"""
if viewset is None:
viewset = resource_cls.default_viewset(**kwargs)
else:
viewset.update(**kwargs)
resource_name = viewset.get_name(resource_cls)
path_formatters = {
'resource_name': resource_name
}
def register_service(endpoint_type, settings):
"""Registers a service in cornice, for the given type."""
path_pattern = getattr(viewset, '%s_path' % endpoint_type)
path = path_pattern.format(**path_formatters)
name = viewset.get_service_name(endpoint_type, resource_cls)
service = Service(name, path, depth=depth,
**viewset.get_service_arguments())
# Attach viewset and resource to the service for later reference.
service.viewset = viewset
service.resource = resource_cls
service.collection_path = viewset.collection_path.format(
**path_formatters)
service.record_path = viewset.record_path.format(**path_formatters)
service.type = endpoint_type
methods = getattr(viewset, '%s_methods' % endpoint_type)
for method in methods:
if not viewset.is_endpoint_enabled(
endpoint_type, resource_name, method.lower(), settings):
continue
argument_getter = getattr(viewset, '%s_arguments' % endpoint_type)
view_args = argument_getter(resource_cls, method)
view = viewset.get_view(endpoint_type, method.lower())
service.add_view(method, view, klass=resource_cls, **view_args)
return service
def callback(context, name, ob):
# get the callbacks registred by the inner services
# and call them from here when the @resource classes are being
# scanned by venusian.
config = context.config.with_package(info.module)
# Storage is mandatory for resources.
if not hasattr(config.registry, 'storage'):
msg = 'Mandatory storage backend is missing from configuration.'
raise pyramid_exceptions.ConfigurationError(msg)
services = [register_service('collection', config.registry.settings),
register_service('record', config.registry.settings)]
for service in services:
config.add_cornice_service(service)
info = venusian.attach(resource_cls, callback, category='pyramid',
depth=depth)
return callback
class UserResource(object):
"""Base resource class providing every endpoint."""
default_viewset = ViewSet
"""Default :class:`kinto.core.viewset.ViewSet` class to use when the resource
is registered."""
default_model = Model
"""Default :class:`kinto.core.resource.model.Model` class to use for
interacting the :mod:`kinto.core.storage` and :mod:`kinto.core.permission`
backends."""
mapping = ResourceSchema()
"""Schema to validate records."""
def __init__(self, request, context=None):
# Models are isolated by user.
parent_id = self.get_parent_id(request)
# Authentication to storage is transmitted as is (cf. cloud_storage).
auth = request.headers.get('Authorization')
self.model = self.default_model(
storage=request.registry.storage,
id_generator=request.registry.id_generator,
collection_id=classname(self),
parent_id=parent_id,
auth=auth)
self.request = request
self.context = context
self.record_id = self.request.matchdict.get('id')
self.force_patch_update = False
# Log resource context.
logger.bind(collection_id=self.model.collection_id,
collection_timestamp=self.timestamp)
@reify
def timestamp(self):
"""Return the current collection timestamp.
:rtype: int
"""
try:
return self.model.timestamp()
except storage_exceptions.BackendError as e:
is_readonly = self.request.registry.settings['readonly']
if not is_readonly:
raise e
# If the instance is configured to be readonly, and if the
# collection is empty, the backend will try to bump the timestamp.
# It fails if the configured db user has not write privileges.
logger.exception(e)
error_msg = ("Collection timestamp cannot be written. "
"Records endpoint must be hit at least once from a "
"writable instance.")
raise http_error(HTTPServiceUnavailable(),
errno=ERRORS.BACKEND,
message=error_msg)
@property
def collection(self):
"""The collection property."""
message = ('``self.collection`` is now deprecated. '
'Please use ``self.model`` instead')
warnings.warn(message, DeprecationWarning)
return self.model
def get_parent_id(self, request):
"""Return the parent_id of the resource with regards to the current
request.
:param request:
The request used to create the resource.
:rtype: str
"""
return request.prefixed_userid
def _get_known_fields(self):
"""Return all the `field` defined in the ressource mapping."""
known_fields = [c.name for c in self.mapping.children] + \
[self.model.id_field,
self.model.modified_field,
self.model.deleted_field]
return known_fields
def is_known_field(self, field):
"""Return ``True`` if `field` is defined in the resource mapping.
:param str field: Field name
:rtype: bool
"""
known_fields = self._get_known_fields()
return field in known_fields
#
# End-points
#
def collection_get(self):
"""Model ``GET`` endpoint: retrieve multiple records.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotModified` if
``If-None-Match`` header is provided and collection not
modified in the interim.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and collection modified
in the iterim.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPBadRequest`
if filters or sorting are invalid.
"""
self._add_timestamp_header(self.request.response)
self._add_cache_header(self.request.response)
self._raise_304_if_not_modified()
self._raise_412_if_modified()
headers = self.request.response.headers
filters = self._extract_filters()
limit = self._extract_limit()
sorting = self._extract_sorting(limit)
partial_fields = self._extract_partial_fields()
filter_fields = [f.field for f in filters]
include_deleted = self.model.modified_field in filter_fields
pagination_rules, offset = self._extract_pagination_rules_from_token(
limit, sorting)
records, total_records = self.model.get_records(
filters=filters,
sorting=sorting,
limit=limit,
pagination_rules=pagination_rules,
include_deleted=include_deleted)
offset = offset + len(records)
next_page = None
if limit and len(records) == limit and offset < total_records:
lastrecord = records[-1]
next_page = self._next_page_url(sorting, limit, lastrecord, offset)
headers['Next-Page'] = encode_header(next_page)
if partial_fields:
records = [
dict_subset(record, partial_fields)
for record in records
]
# Bind metric about response size.
logger.bind(nb_records=len(records), limit=limit)
headers['Total-Records'] = encode_header('%s' % total_records)
return self.postprocess(records)
def collection_post(self):
"""Model ``POST`` endpoint: create a record.
If the new record conflicts against a unique field constraint, the
posted record is ignored, and the existing record is returned, with
a ``200`` status.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and collection modified
in the iterim.
.. seealso::
Add custom behaviour by overriding
:meth:`kinto.core.resource.UserResource.process_record`
"""
existing = None
new_record = self.request.validated['data']
try:
id_field = self.model.id_field
# Since ``id`` does not belong to schema, look up in body.
new_record[id_field] = _id = self.request.json['data'][id_field]
self._raise_400_if_invalid_id(_id)
existing = self._get_record_or_404(_id)
except (HTTPNotFound, KeyError, ValueError):
pass
self._raise_412_if_modified(record=existing)
new_record = self.process_record(new_record)
try:
unique_fields = self.mapping.get_option('unique_fields')
record = self.model.create_record(new_record,
unique_fields=unique_fields)
self.request.response.status_code = 201
action = ACTIONS.CREATE
except storage_exceptions.UnicityError as e:
record = e.record
# failed to write
action = ACTIONS.READ
return self.postprocess(record, action=action)
def collection_delete(self):
"""Model ``DELETE`` endpoint: delete multiple records.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and collection modified
in the iterim.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPBadRequest`
if filters are invalid.
"""
self._raise_412_if_modified()
filters = self._extract_filters()
deleted = self.model.delete_records(filters=filters)
action = len(deleted) > 0 and ACTIONS.DELETE or ACTIONS.READ
return self.postprocess(deleted, action=action)
def get(self):
"""Record ``GET`` endpoint: retrieve a record.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if
the record is not found.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotModified` if
``If-None-Match`` header is provided and record not
modified in the interim.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and record modified
in the iterim.
"""
self._raise_400_if_invalid_id(self.record_id)
record = self._get_record_or_404(self.record_id)
timestamp = record[self.model.modified_field]
self._add_timestamp_header(self.request.response, timestamp=timestamp)
self._add_cache_header(self.request.response)
self._raise_304_if_not_modified(record)
self._raise_412_if_modified(record)
partial_fields = self._extract_partial_fields()
if partial_fields:
record = dict_subset(record, partial_fields)
return self.postprocess(record)
def put(self):
"""Record ``PUT`` endpoint: create or replace the provided record and
return it.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and record modified
in the iterim.
.. note::
If ``If-None-Match: *`` request header is provided, the
``PUT`` will succeed only if no record exists with this id.
.. seealso::
Add custom behaviour by overriding
:meth:`kinto.core.resource.UserResource.process_record`.
"""
self._raise_400_if_invalid_id(self.record_id)
id_field = self.model.id_field
existing = None
tombstones = None
try:
existing = self._get_record_or_404(self.record_id)
except HTTPNotFound:
# Look if this record used to exist (for preconditions check).
filter_by_id = Filter(id_field, self.record_id, COMPARISON.EQ)
tombstones, _ = self.model.get_records(filters=[filter_by_id],
include_deleted=True)
if len(tombstones) > 0:
existing = tombstones[0]
finally:
if existing:
self._raise_412_if_modified(existing)
post_record = self.request.validated['data']
record_id = post_record.setdefault(id_field, self.record_id)
self._raise_400_if_id_mismatch(record_id, self.record_id)
new_record = self.process_record(post_record, old=existing)
try:
unique = self.mapping.get_option('unique_fields')
if existing and not tombstones:
record = self.model.update_record(new_record,
unique_fields=unique)
else:
record = self.model.create_record(new_record,
unique_fields=unique)
self.request.response.status_code = 201
except storage_exceptions.UnicityError as e:
self._raise_conflict(e)
timestamp = record[self.model.modified_field]
self._add_timestamp_header(self.request.response, timestamp=timestamp)
action = existing and ACTIONS.UPDATE or ACTIONS.CREATE
return self.postprocess(record, action=action, old=existing)
def patch(self):
"""Record ``PATCH`` endpoint: modify a record and return its
new version.
If a request header ``Response-Behavior`` is set to ``light``,
only the fields whose value was changed are returned.
If set to ``diff``, only the fields whose value became different than
the one provided are returned.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if
the record is not found.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and record modified
in the iterim.
.. seealso::
Add custom behaviour by overriding
:meth:`kinto.core.resource.UserResource.apply_changes` or
:meth:`kinto.core.resource.UserResource.process_record`.
"""
self._raise_400_if_invalid_id(self.record_id)
existing = self._get_record_or_404(self.record_id)
self._raise_412_if_modified(existing)
try:
# `data` attribute may not be present if only perms are patched.
changes = self.request.json.get('data', {})
except ValueError:
# If no `data` nor `permissions` is provided in patch, reject!
# XXX: This should happen in schema instead (c.f. ShareableViewSet)
error_details = {
'name': 'data',
'description': 'Provide at least one of data or permissions',
}
raise_invalid(self.request, **error_details)
updated = self.apply_changes(existing, changes=changes)
record_id = updated.setdefault(self.model.id_field,
self.record_id)
self._raise_400_if_id_mismatch(record_id, self.record_id)
new_record = self.process_record(updated, old=existing)
changed_fields = [k for k in changes.keys()
if existing.get(k) != new_record.get(k)]
# Save in storage if necessary.
if changed_fields or self.force_patch_update:
try:
unique_fields = self.mapping.get_option('unique_fields')
new_record = self.model.update_record(
new_record,
unique_fields=unique_fields)
except storage_exceptions.UnicityError as e:
self._raise_conflict(e)
else:
# Behave as if storage would have added `id` and `last_modified`.
for extra_field in [self.model.modified_field,
self.model.id_field]:
new_record[extra_field] = existing[extra_field]
# Adjust response according to ``Response-Behavior`` header
body_behavior = self.request.headers.get('Response-Behavior', 'full')
if body_behavior.lower() == 'light':
# Only fields that were changed.
data = {k: new_record[k] for k in changed_fields}
elif body_behavior.lower() == 'diff':
# Only fields that are different from those provided.
data = {k: new_record[k] for k in changed_fields
if changes.get(k) != new_record.get(k)}
else:
data = new_record
timestamp = new_record.get(self.model.modified_field,
existing[self.model.modified_field])
self._add_timestamp_header(self.request.response, timestamp=timestamp)
return self.postprocess(data, action=ACTIONS.UPDATE, old=existing)
def delete(self):
"""Record ``DELETE`` endpoint: delete a record and return it.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if
the record is not found.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and record modified
in the iterim.
"""
self._raise_400_if_invalid_id(self.record_id)
record = self._get_record_or_404(self.record_id)
self._raise_412_if_modified(record)
# Retreive the last_modified information from a querystring if present.
last_modified = self.request.GET.get('last_modified')
if last_modified:
last_modified = native_value(last_modified.strip('"'))
if not isinstance(last_modified, six.integer_types):
error_details = {
'name': 'last_modified',
'location': 'querystring',
'description': 'Invalid value for %s' % last_modified
}
raise_invalid(self.request, **error_details)
# If less or equal than current record. Ignore it.
if last_modified <= record[self.model.modified_field]:
last_modified = None
deleted = self.model.delete_record(record, last_modified=last_modified)
return self.postprocess(deleted, action=ACTIONS.DELETE)
#
# Data processing
#
def process_record(self, new, old=None):
"""Hook for processing records before they reach storage, to introduce
specific logics on fields for example.
.. code-block:: python
def process_record(self, new, old=None):
new = super(MyResource, self).process_record(new, old)
version = old['version'] if old else 0
new['version'] = version + 1
return new
Or add extra validation based on request:
.. code-block:: python
from kinto.core.errors import raise_invalid
def process_record(self, new, old=None):
new = super(MyResource, self).process_record(new, old)
if new['browser'] not in request.headers['User-Agent']:
raise_invalid(self.request, name='browser', error='Wrong')
return new
:param dict new: the validated record to be created or updated.
:param dict old: the old record to be updated,
``None`` for creation endpoints.
:returns: the processed record.
:rtype: dict
"""
new_last_modified = new.get(self.model.modified_field)
not_specified = old is None or self.model.modified_field not in old
if new_last_modified is None or not_specified:
return new
# Drop the new last_modified if lesser or equal to the old one.
is_less_or_equal = new_last_modified <= old[self.model.modified_field]
if new_last_modified and is_less_or_equal:
del new[self.model.modified_field]
return new
def apply_changes(self, record, changes):
"""Merge `changes` into `record` fields.
.. note::
This is used in the context of PATCH only.
Override this to control field changes at record level, for example:
.. code-block:: python
def apply_changes(self, record, changes):
# Ignore value change if inferior
if record['position'] > changes.get('position', -1):
changes.pop('position', None)
return super(MyResource, self).apply_changes(record, changes)
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPBadRequest`
if result does not comply with resource schema.
:returns: the new record with `changes` applied.
:rtype: dict
"""
for field, value in changes.items():
has_changed = record.get(field, value) != value
if self.mapping.is_readonly(field) and has_changed:
error_details = {
'name': field,
'description': 'Cannot modify {0}'.format(field)
}
raise_invalid(self.request, **error_details)
updated = record.copy()
updated.update(**changes)
try:
return self.mapping.deserialize(updated)
except colander.Invalid as e:
# Transform the errors we got from colander into Cornice errors.
# We could not rely on Service schema because the record should be
# validated only once the changes are applied
for field, error in e.asdict().items():
raise_invalid(self.request, name=field, description=error)
def postprocess(self, result, action=ACTIONS.READ, old=None):
body = {
'data': result
}
self.request.notify_resource_event(timestamp=self.timestamp,
data=result,
action=action,
old=old)
return body
#
# Internals
#
def _get_record_or_404(self, record_id):
"""Retrieve record from storage and raise ``404 Not found`` if missing.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if
the record is not found.
"""
if self.context and self.context.current_record:
# Set during authorization. Save a storage hit.
return self.context.current_record
try:
return self.model.get_record(record_id)
except storage_exceptions.RecordNotFoundError:
response = http_error(HTTPNotFound(),
errno=ERRORS.INVALID_RESOURCE_ID)
raise response
def _add_timestamp_header(self, response, timestamp=None):
"""Add current timestamp in response headers, when request comes in.
"""
if timestamp is None:
timestamp = self.timestamp
# Pyramid takes care of converting.
response.last_modified = timestamp / 1000.0
# Return timestamp as ETag.
response.headers['ETag'] = encode_header('"%s"' % timestamp)
def _add_cache_header(self, response):
"""Add Cache-Control and Expire headers, based a on a setting for the
current resource.
Cache headers will be set with anonymous requests only.
.. note::
The ``Cache-Control: no-cache`` response header does not prevent
caching in client. It will indicate the client to revalidate
the response content on each access. The client will send a
conditional request to the server and check that a
``304 Not modified`` is returned before serving content from cache.
"""
resource_name = self.context.resource_name if self.context else ''
setting_key = '%s_cache_expires_seconds' % resource_name
collection_expires = self.request.registry.settings.get(setting_key)
is_anonymous = self.request.prefixed_userid is None
if collection_expires and is_anonymous:
response.cache_expires(seconds=int(collection_expires))
else:
# Since `Expires` response header provides an HTTP data with a
# resolution in seconds, do not use Pyramid `cache_expires()` in
# order to omit it.
response.cache_control.no_cache = True
def _raise_400_if_invalid_id(self, record_id):
"""Raise 400 if specified record id does not match the format excepted
by storage backends.
:raises: :class:`pyramid.httpexceptions.HTTPBadRequest`
"""
if not self.model.id_generator.match(six.text_type(record_id)):
error_details = {
'location': 'path',
'description': "Invalid record id"
}
raise_invalid(self.request, **error_details)
def _raise_304_if_not_modified(self, record=None):
"""Raise 304 if current timestamp is inferior to the one specified
in headers.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotModified`
"""
if_none_match = self.request.headers.get('If-None-Match')
if not if_none_match:
return
if_none_match = decode_header(if_none_match)
try:
if not (if_none_match[0] == if_none_match[-1] == '"'):
raise ValueError()
modified_since = int(if_none_match[1:-1])
except (IndexError, ValueError):
if if_none_match == '*':
return
error_details = {
'location': 'headers',
'description': "Invalid value for If-None-Match"
}
raise_invalid(self.request, **error_details)
if record:
current_timestamp = record[self.model.modified_field]
else:
current_timestamp = self.model.timestamp()
if current_timestamp <= modified_since:
response = HTTPNotModified()
self._add_timestamp_header(response, timestamp=current_timestamp)
raise response
def _raise_412_if_modified(self, record=None):
"""Raise 412 if current timestamp is superior to the one
specified in headers.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed`
"""
if_match = self.request.headers.get('If-Match')
if_none_match = self.request.headers.get('If-None-Match')
if not if_match and not if_none_match:
return
if_match = decode_header(if_match) if if_match else None
if record and if_none_match and decode_header(if_none_match) == '*':
if record.get(self.model.deleted_field, False):
# Tombstones should not prevent creation.
return
modified_since = -1 # Always raise.
elif if_match:
try:
if not (if_match[0] == if_match[-1] == '"'):
raise ValueError()
modified_since = int(if_match[1:-1])
except (IndexError, ValueError):
message = ("Invalid value for If-Match. The value should "
"be integer between double quotes.")
error_details = {
'location': 'headers',
'description': message
}
raise_invalid(self.request, **error_details)
else:
# In case _raise_304_if_not_modified() did not raise.
return
if record:
current_timestamp = record[self.model.modified_field]
else:
current_timestamp = self.model.timestamp()
if current_timestamp > modified_since:
error_msg = 'Resource was modified meanwhile'
details = {'existing': record} if record else {}
response = http_error(HTTPPreconditionFailed(),
errno=ERRORS.MODIFIED_MEANWHILE,
message=error_msg,
details=details)
self._add_timestamp_header(response, timestamp=current_timestamp)
raise response
def _raise_conflict(self, exception):
"""Helper to raise conflict responses.
:param exception: the original unicity error
:type exception: :class:`kinto.core.storage.exceptions.UnicityError`
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPConflict`
"""
field = exception.field
record_id = exception.record[self.model.id_field]
message = 'Conflict of field %s on record %s' % (field, record_id)
details = {
"field": field,
"existing": exception.record,
}
response = http_error(HTTPConflict(),
errno=ERRORS.CONSTRAINT_VIOLATED,
message=message,
details=details)
raise response
def _raise_400_if_id_mismatch(self, new_id, record_id):
"""Raise 400 if the `new_id`, within the request body, does not match
the `record_id`, obtained from request path.
:raises: :class:`pyramid.httpexceptions.HTTPBadRequest`
"""
if new_id != record_id:
error_msg = 'Record id does not match existing record'
error_details = {
'name': self.model.id_field,
'description': error_msg
}
raise_invalid(self.request, **error_details)
def _extract_partial_fields(self):
"""Extract the fields to do the projection from QueryString parameters.
"""
fields = self.request.GET.get('_fields', None)
if fields:
fields = fields.split(',')
root_fields = [f.split('.')[0] for f in fields]
known_fields = self._get_known_fields()
invalid_fields = set(root_fields) - set(known_fields)
preserve_unknown = self.mapping.get_option('preserve_unknown')
if not preserve_unknown and invalid_fields:
error_msg = "Fields %s do not exist" % ','.join(invalid_fields)
error_details = {
'name': "Invalid _fields parameter",
'description': error_msg
}
raise_invalid(self.request, **error_details)
# Since id and last_modified are part of the synchronisation
# protocol, force their presence in payloads.
fields = fields + [self.model.id_field, self.model.modified_field]
return fields
def _extract_limit(self):
"""Extract limit value from QueryString parameters."""
paginate_by = self.request.registry.settings['paginate_by']
limit = self.request.GET.get('_limit', paginate_by)
if limit:
try:
limit = int(limit)
except ValueError:
error_details = {
'location': 'querystring',
'description': "_limit should be an integer"
}
raise_invalid(self.request, **error_details)
# If limit is higher than paginate_by setting, ignore it.
if limit and paginate_by:
limit = min(limit, paginate_by)
return limit
def _extract_filters(self, queryparams=None):
"""Extracts filters from QueryString parameters."""
if not queryparams:
queryparams = self.request.GET
filters = []
for param, paramvalue in queryparams.items():
param = param.strip()
error_details = {
'name': param,
'location': 'querystring',
'description': 'Invalid value for %s' % param
}
# Ignore specific fields
if param.startswith('_') and param not in ('_since',
'_to',
'_before'):
continue
# Handle the _since specific filter.
if param in ('_since', '_to', '_before'):
value = native_value(paramvalue.strip('"'))
if not isinstance(value, six.integer_types):
raise_invalid(self.request, **error_details)
if param == '_since':
operator = COMPARISON.GT
else:
if param == '_to':
message = ('_to is now deprecated, '
'you should use _before instead')
url = ('http://kinto.rtfd.org/en/2.4.0/api/resource'
'.html#list-of-available-url-parameters')
send_alert(self.request, message, url)
operator = COMPARISON.LT
filters.append(
Filter(self.model.modified_field, value, operator)
)
continue
m = re.match(r'^(min|max|not|lt|gt|in|exclude)_(\w+)$', param)
if m:
keyword, field = m.groups()
operator = getattr(COMPARISON, keyword.upper())
else:
operator, field = COMPARISON.EQ, param
if not self.is_known_field(field):
error_msg = "Unknown filter field '{0}'".format(param)
error_details['description'] = error_msg
raise_invalid(self.request, **error_details)
value = native_value(paramvalue)
if operator in (COMPARISON.IN, COMPARISON.EXCLUDE):
value = set([native_value(v) for v in paramvalue.split(',')])
all_integers = all([isinstance(v, six.integer_types)
for v in value])
all_strings = all([isinstance(v, six.text_type)
for v in value])
has_invalid_value = (
(field == self.model.id_field and not all_strings) or
(field == self.model.modified_field and not all_integers)
)
if has_invalid_value:
raise_invalid(self.request, **error_details)
filters.append(Filter(field, value, operator))
return filters
def _extract_sorting(self, limit):
"""Extracts filters from QueryString parameters."""
specified = self.request.GET.get('_sort', '').split(',')
sorting = []
modified_field_used = self.model.modified_field in specified
for field in specified:
field = field.strip()
m = re.match(r'^([\-+]?)(\w+)$', field)
if m:
order, field = m.groups()
if not self.is_known_field(field):
error_details = {
'location': 'querystring',
'description': "Unknown sort field '{0}'".format(field)
}
raise_invalid(self.request, **error_details)
direction = -1 if order == '-' else 1
sorting.append(Sort(field, direction))
if not modified_field_used:
# Add a sort by the ``modified_field`` in descending order
# useful for pagination
sorting.append(Sort(self.model.modified_field, -1))
return sorting
def _build_pagination_rules(self, sorting, last_record, rules=None):
"""Return the list of rules for a given sorting attribute and
last_record.
"""
if rules is None:
rules = []
rule = []
next_sorting = sorting[:-1]
for field, _ in next_sorting:
rule.append(Filter(field, last_record.get(field), COMPARISON.EQ))
field, direction = sorting[-1]
if direction == -1:
rule.append(Filter(field, last_record.get(field), COMPARISON.LT))
else:
rule.append(Filter(field, last_record.get(field), COMPARISON.GT))
rules.append(rule)
if len(next_sorting) == 0:
return rules
return self._build_pagination_rules(next_sorting, last_record, rules)
def _extract_pagination_rules_from_token(self, limit, sorting):
"""Get pagination params."""
queryparams = self.request.GET
token = queryparams.get('_token', None)
filters = []
offset = 0
if token:
try:
tokeninfo = json.loads(decode64(token))
if not isinstance(tokeninfo, dict):
raise ValueError()
last_record = tokeninfo['last_record']
offset = tokeninfo['offset']
except (ValueError, KeyError, TypeError):
error_msg = '_token has invalid content'
error_details = {
'location': 'querystring',
'description': error_msg
}
raise_invalid(self.request, **error_details)
filters = self._build_pagination_rules(sorting, last_record)
return filters, offset
def _next_page_url(self, sorting, limit, last_record, offset):
"""Build the Next-Page header from where we stopped."""
token = self._build_pagination_token(sorting, last_record, offset)
params = self.request.GET.copy()
params['_limit'] = limit
params['_token'] = token
service = self.request.current_service
next_page_url = self.request.route_url(service.name, _query=params,
**self.request.matchdict)
return next_page_url
def _build_pagination_token(self, sorting, last_record, offset):
"""Build a pagination token.
It is a base64 JSON object with the sorting fields values of
the last_record.
"""
token = {
'last_record': {},
'offset': offset
}
for field, _ in sorting:
token['last_record'][field] = last_record[field]
return encode64(json.dumps(token))
@six.add_metaclass(DeprecatedMeta)
class BaseResource(UserResource):
__deprecation_warning__ = ('BaseResource is deprecated. '
'Use UserResource instead.')
class ShareableResource(UserResource):
"""Shareable resources allow to set permissions on records, in order to
share their access or protect their modification.
"""
default_model = ShareableModel
default_viewset = ShareableViewSet
permissions = ('read', 'write')
"""List of allowed permissions names."""
def __init__(self, *args, **kwargs):
super(ShareableResource, self).__init__(*args, **kwargs)
# In base resource, PATCH only hit storage if no data has changed.
# Here, we force update because we add the current principal to
# the ``write`` ACE.
self.force_patch_update = True
# Required by the ShareableModel class.
self.model.permission = self.request.registry.permission
self.model.current_principal = self.request.prefixed_userid
if self.context:
self.model.get_permission_object_id = functools.partial(
self.context.get_permission_object_id,
self.request)
def get_parent_id(self, request):
"""Unlike :class:`BaseResource`, records are not isolated by user.
See https://github.com/mozilla-services/cliquet/issues/549
:returns: A constant empty value.
"""
return ''
def _extract_filters(self, queryparams=None):
"""Override default filters extraction from QueryString to allow
partial collection of records.
XXX: find more elegant approach to add custom filters.
"""
filters = super(ShareableResource, self)._extract_filters(queryparams)
ids = self.context.shared_ids
if ids:
filter_by_id = Filter(self.model.id_field, ids, COMPARISON.IN)
filters.insert(0, filter_by_id)
return filters
def _raise_412_if_modified(self, record=None):
"""Do not provide the permissions among the record fields.
Ref: https://github.com/Kinto/kinto/issues/224
"""
if record:
record = record.copy()
record.pop(self.model.permissions_field, None)
return super(ShareableResource, self)._raise_412_if_modified(record)
def process_record(self, new, old=None):
"""Read permissions from request body, and in the case of ``PUT`` every
existing ACE is removed (using empty list).
"""
new = super(ShareableResource, self).process_record(new, old)
permissions = self.request.validated.get('permissions', {})
annotated = new.copy()
if permissions:
is_put = (self.request.method.lower() == 'put')
if is_put:
# Remove every existing ACEs using empty lists.
for perm in self.permissions:
permissions.setdefault(perm, [])
annotated[self.model.permissions_field] = permissions
return annotated
def postprocess(self, result, action=ACTIONS.READ, old=None):
"""Add ``permissions`` attribute in response body.
In the protocol, it was decided that ``permissions`` would reside
outside the ``data`` attribute.
"""
body = {}
if not isinstance(result, list):
# record endpoint.
perms = result.pop(self.model.permissions_field, None)
if perms is not None:
body['permissions'] = {k: list(p) for k, p in perms.items()}
if old:
# Remove permissions from event payload.
old.pop(self.model.permissions_field, None)
data = super(ShareableResource, self).postprocess(result, action, old)
body.update(data)
return body
@six.add_metaclass(DeprecatedMeta)
class ProtectedResource(ShareableResource):
__deprecation_warning__ = ('ProtectedResource is deprecated. '
'Use ShareableResource instead.')
| 1 | 9,165 | Maybe there is a better way here so that we don't even bother triggering a storage query since we want an empty list here. | Kinto-kinto | py |
@@ -0,0 +1,16 @@
+package aws
+
+import (
+ "context"
+
+ "github.com/aws/aws-sdk-go-v2/service/iam"
+)
+
+func (c *client) SimulateCustomPolicy(ctx context.Context, region string, customPolicySimulatorParams *iam.SimulateCustomPolicyInput) (*iam.SimulateCustomPolicyOutput, error) {
+ rc, err := c.getRegionalClient(region)
+ if err != nil {
+ return nil, err
+ }
+
+ return rc.iam.SimulateCustomPolicy(ctx, customPolicySimulatorParams)
+} | 1 | 1 | 12,116 | just curious, `region` is not used in the AWS IAM client, afaik. But I see this is a general field for clutch clients, is it just ignored internally? | lyft-clutch | go |
|
@@ -7,7 +7,7 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an | 1 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
* Contains some contributions under the Thrift Software License.
* Please see doc/old-thrift-license.txt in the Thrift distribution for
* details.
*/
using System;
using System.Text;
using Thrift.Transport;
using System.Collections;
using System.IO;
using System.Collections.Generic;
namespace Thrift.Protocol
{
public class TCompactProtocol : TProtocol
{
private static TStruct ANONYMOUS_STRUCT = new TStruct("");
private static TField TSTOP = new TField("", TType.Stop, (short)0);
private static byte[] ttypeToCompactType = new byte[16];
private const byte PROTOCOL_ID = 0x82;
private const byte VERSION = 1;
private const byte VERSION_MASK = 0x1f; // 0001 1111
private const byte TYPE_MASK = 0xE0; // 1110 0000
private const byte TYPE_BITS = 0x07; // 0000 0111
private const int TYPE_SHIFT_AMOUNT = 5;
/**
* All of the on-wire type codes.
*/
private static class Types
{
public const byte STOP = 0x00;
public const byte BOOLEAN_TRUE = 0x01;
public const byte BOOLEAN_FALSE = 0x02;
public const byte BYTE = 0x03;
public const byte I16 = 0x04;
public const byte I32 = 0x05;
public const byte I64 = 0x06;
public const byte DOUBLE = 0x07;
public const byte BINARY = 0x08;
public const byte LIST = 0x09;
public const byte SET = 0x0A;
public const byte MAP = 0x0B;
public const byte STRUCT = 0x0C;
}
/**
* Used to keep track of the last field for the current and previous structs,
* so we can do the delta stuff.
*/
private Stack<short> lastField_ = new Stack<short>(15);
private short lastFieldId_ = 0;
/**
* If we encounter a boolean field begin, save the TField here so it can
* have the value incorporated.
*/
private Nullable<TField> booleanField_;
/**
* If we Read a field header, and it's a boolean field, save the boolean
* value here so that ReadBool can use it.
*/
private Nullable<Boolean> boolValue_;
#region CompactProtocol Factory
/**
* Factory
*/
public class Factory : TProtocolFactory
{
public Factory() { }
public TProtocol GetProtocol(TTransport trans)
{
return new TCompactProtocol(trans);
}
}
#endregion
public TCompactProtocol(TTransport trans)
: base(trans)
{
ttypeToCompactType[(int)TType.Stop] = Types.STOP;
ttypeToCompactType[(int)TType.Bool] = Types.BOOLEAN_TRUE;
ttypeToCompactType[(int)TType.Byte] = Types.BYTE;
ttypeToCompactType[(int)TType.I16] = Types.I16;
ttypeToCompactType[(int)TType.I32] = Types.I32;
ttypeToCompactType[(int)TType.I64] = Types.I64;
ttypeToCompactType[(int)TType.Double] = Types.DOUBLE;
ttypeToCompactType[(int)TType.String] = Types.BINARY;
ttypeToCompactType[(int)TType.List] = Types.LIST;
ttypeToCompactType[(int)TType.Set] = Types.SET;
ttypeToCompactType[(int)TType.Map] = Types.MAP;
ttypeToCompactType[(int)TType.Struct] = Types.STRUCT;
}
public void reset()
{
lastField_.Clear();
lastFieldId_ = 0;
}
#region Write Methods
/**
* Writes a byte without any possibility of all that field header nonsense.
* Used internally by other writing methods that know they need to Write a byte.
*/
private byte[] byteDirectBuffer = new byte[1];
private void WriteByteDirect(byte b)
{
byteDirectBuffer[0] = b;
trans.Write(byteDirectBuffer);
}
/**
* Writes a byte without any possibility of all that field header nonsense.
*/
private void WriteByteDirect(int n)
{
WriteByteDirect((byte)n);
}
/**
* Write an i32 as a varint. Results in 1-5 bytes on the wire.
* TODO: make a permanent buffer like WriteVarint64?
*/
byte[] i32buf = new byte[5];
private void WriteVarint32(uint n)
{
int idx = 0;
while (true)
{
if ((n & ~0x7F) == 0)
{
i32buf[idx++] = (byte)n;
// WriteByteDirect((byte)n);
break;
// return;
}
else
{
i32buf[idx++] = (byte)((n & 0x7F) | 0x80);
// WriteByteDirect((byte)((n & 0x7F) | 0x80));
n >>= 7;
}
}
trans.Write(i32buf, 0, idx);
}
/**
* Write a message header to the wire. Compact Protocol messages contain the
* protocol version so we can migrate forwards in the future if need be.
*/
public override void WriteMessageBegin(TMessage message)
{
WriteByteDirect(PROTOCOL_ID);
WriteByteDirect((byte)((VERSION & VERSION_MASK) | ((((uint)message.Type) << TYPE_SHIFT_AMOUNT) & TYPE_MASK)));
WriteVarint32((uint)message.SeqID);
WriteString(message.Name);
}
/**
* Write a struct begin. This doesn't actually put anything on the wire. We
* use it as an opportunity to put special placeholder markers on the field
* stack so we can get the field id deltas correct.
*/
public override void WriteStructBegin(TStruct strct)
{
lastField_.Push(lastFieldId_);
lastFieldId_ = 0;
}
/**
* Write a struct end. This doesn't actually put anything on the wire. We use
* this as an opportunity to pop the last field from the current struct off
* of the field stack.
*/
public override void WriteStructEnd()
{
lastFieldId_ = lastField_.Pop();
}
/**
* Write a field header containing the field id and field type. If the
* difference between the current field id and the last one is small (< 15),
* then the field id will be encoded in the 4 MSB as a delta. Otherwise, the
* field id will follow the type header as a zigzag varint.
*/
public override void WriteFieldBegin(TField field)
{
if (field.Type == TType.Bool)
{
// we want to possibly include the value, so we'll wait.
booleanField_ = field;
}
else
{
WriteFieldBeginInternal(field, 0xFF);
}
}
/**
* The workhorse of WriteFieldBegin. It has the option of doing a
* 'type override' of the type header. This is used specifically in the
* boolean field case.
*/
private void WriteFieldBeginInternal(TField field, byte typeOverride)
{
// short lastField = lastField_.Pop();
// if there's a type override, use that.
byte typeToWrite = typeOverride == 0xFF ? getCompactType(field.Type) : typeOverride;
// check if we can use delta encoding for the field id
if (field.ID > lastFieldId_ && field.ID - lastFieldId_ <= 15)
{
// Write them together
WriteByteDirect((field.ID - lastFieldId_) << 4 | typeToWrite);
}
else
{
// Write them separate
WriteByteDirect(typeToWrite);
WriteI16(field.ID);
}
lastFieldId_ = field.ID;
// lastField_.push(field.id);
}
/**
* Write the STOP symbol so we know there are no more fields in this struct.
*/
public override void WriteFieldStop()
{
WriteByteDirect(Types.STOP);
}
/**
* Write a map header. If the map is empty, omit the key and value type
* headers, as we don't need any additional information to skip it.
*/
public override void WriteMapBegin(TMap map)
{
if (map.Count == 0)
{
WriteByteDirect(0);
}
else
{
WriteVarint32((uint)map.Count);
WriteByteDirect(getCompactType(map.KeyType) << 4 | getCompactType(map.ValueType));
}
}
/**
* Write a list header.
*/
public override void WriteListBegin(TList list)
{
WriteCollectionBegin(list.ElementType, list.Count);
}
/**
* Write a set header.
*/
public override void WriteSetBegin(TSet set)
{
WriteCollectionBegin(set.ElementType, set.Count);
}
/**
* Write a boolean value. Potentially, this could be a boolean field, in
* which case the field header info isn't written yet. If so, decide what the
* right type header is for the value and then Write the field header.
* Otherwise, Write a single byte.
*/
public override void WriteBool(Boolean b)
{
if (booleanField_ != null)
{
// we haven't written the field header yet
WriteFieldBeginInternal(booleanField_.Value, b ? Types.BOOLEAN_TRUE : Types.BOOLEAN_FALSE);
booleanField_ = null;
}
else
{
// we're not part of a field, so just Write the value.
WriteByteDirect(b ? Types.BOOLEAN_TRUE : Types.BOOLEAN_FALSE);
}
}
/**
* Write a byte. Nothing to see here!
*/
public override void WriteByte(sbyte b)
{
WriteByteDirect((byte)b);
}
/**
* Write an I16 as a zigzag varint.
*/
public override void WriteI16(short i16)
{
WriteVarint32(intToZigZag(i16));
}
/**
* Write an i32 as a zigzag varint.
*/
public override void WriteI32(int i32)
{
WriteVarint32(intToZigZag(i32));
}
/**
* Write an i64 as a zigzag varint.
*/
public override void WriteI64(long i64)
{
WriteVarint64(longToZigzag(i64));
}
/**
* Write a double to the wire as 8 bytes.
*/
public override void WriteDouble(double dub)
{
byte[] data = new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 };
fixedLongToBytes(BitConverter.DoubleToInt64Bits(dub), data, 0);
trans.Write(data);
}
/**
* Write a string to the wire with a varint size preceding.
*/
public override void WriteString(String str)
{
byte[] bytes = UTF8Encoding.UTF8.GetBytes(str);
WriteBinary(bytes, 0, bytes.Length);
}
/**
* Write a byte array, using a varint for the size.
*/
public override void WriteBinary(byte[] bin)
{
WriteBinary(bin, 0, bin.Length);
}
private void WriteBinary(byte[] buf, int offset, int length)
{
WriteVarint32((uint)length);
trans.Write(buf, offset, length);
}
//
// These methods are called by structs, but don't actually have any wire
// output or purpose.
//
public override void WriteMessageEnd() { }
public override void WriteMapEnd() { }
public override void WriteListEnd() { }
public override void WriteSetEnd() { }
public override void WriteFieldEnd() { }
//
// Internal writing methods
//
/**
* Abstract method for writing the start of lists and sets. List and sets on
* the wire differ only by the type indicator.
*/
protected void WriteCollectionBegin(TType elemType, int size)
{
if (size <= 14)
{
WriteByteDirect(size << 4 | getCompactType(elemType));
}
else
{
WriteByteDirect(0xf0 | getCompactType(elemType));
WriteVarint32((uint)size);
}
}
/**
* Write an i64 as a varint. Results in 1-10 bytes on the wire.
*/
byte[] varint64out = new byte[10];
private void WriteVarint64(ulong n)
{
int idx = 0;
while (true)
{
if ((n & ~(ulong)0x7FL) == 0)
{
varint64out[idx++] = (byte)n;
break;
}
else
{
varint64out[idx++] = ((byte)((n & 0x7F) | 0x80));
n >>= 7;
}
}
trans.Write(varint64out, 0, idx);
}
/**
* Convert l into a zigzag long. This allows negative numbers to be
* represented compactly as a varint.
*/
private ulong longToZigzag(long n)
{
return (ulong)(n << 1) ^ (ulong)(n >> 63);
}
/**
* Convert n into a zigzag int. This allows negative numbers to be
* represented compactly as a varint.
*/
private uint intToZigZag(int n)
{
return (uint)(n << 1) ^ (uint)(n >> 31);
}
/**
* Convert a long into little-endian bytes in buf starting at off and going
* until off+7.
*/
private void fixedLongToBytes(long n, byte[] buf, int off)
{
buf[off + 0] = (byte)(n & 0xff);
buf[off + 1] = (byte)((n >> 8) & 0xff);
buf[off + 2] = (byte)((n >> 16) & 0xff);
buf[off + 3] = (byte)((n >> 24) & 0xff);
buf[off + 4] = (byte)((n >> 32) & 0xff);
buf[off + 5] = (byte)((n >> 40) & 0xff);
buf[off + 6] = (byte)((n >> 48) & 0xff);
buf[off + 7] = (byte)((n >> 56) & 0xff);
}
#endregion
#region ReadMethods
/**
* Read a message header.
*/
public override TMessage ReadMessageBegin()
{
byte protocolId = (byte)ReadByte();
if (protocolId != PROTOCOL_ID)
{
throw new TProtocolException("Expected protocol id " + PROTOCOL_ID.ToString("X") + " but got " + protocolId.ToString("X"));
}
byte versionAndType = (byte)ReadByte();
byte version = (byte)(versionAndType & VERSION_MASK);
if (version != VERSION)
{
throw new TProtocolException("Expected version " + VERSION + " but got " + version);
}
byte type = (byte)((versionAndType >> TYPE_SHIFT_AMOUNT) & TYPE_BITS);
int seqid = (int)ReadVarint32();
String messageName = ReadString();
return new TMessage(messageName, (TMessageType)type, seqid);
}
/**
* Read a struct begin. There's nothing on the wire for this, but it is our
* opportunity to push a new struct begin marker onto the field stack.
*/
public override TStruct ReadStructBegin()
{
lastField_.Push(lastFieldId_);
lastFieldId_ = 0;
return ANONYMOUS_STRUCT;
}
/**
* Doesn't actually consume any wire data, just removes the last field for
* this struct from the field stack.
*/
public override void ReadStructEnd()
{
// consume the last field we Read off the wire.
lastFieldId_ = lastField_.Pop();
}
/**
* Read a field header off the wire.
*/
public override TField ReadFieldBegin()
{
byte type = (byte)ReadByte();
// if it's a stop, then we can return immediately, as the struct is over.
if (type == Types.STOP)
{
return TSTOP;
}
short fieldId;
// mask off the 4 MSB of the type header. it could contain a field id delta.
short modifier = (short)((type & 0xf0) >> 4);
if (modifier == 0)
{
// not a delta. look ahead for the zigzag varint field id.
fieldId = ReadI16();
}
else
{
// has a delta. add the delta to the last Read field id.
fieldId = (short)(lastFieldId_ + modifier);
}
TField field = new TField("", getTType((byte)(type & 0x0f)), fieldId);
// if this happens to be a boolean field, the value is encoded in the type
if (isBoolType(type))
{
// save the boolean value in a special instance variable.
boolValue_ = (byte)(type & 0x0f) == Types.BOOLEAN_TRUE ? true : false;
}
// push the new field onto the field stack so we can keep the deltas going.
lastFieldId_ = field.ID;
return field;
}
/**
* Read a map header off the wire. If the size is zero, skip Reading the key
* and value type. This means that 0-length maps will yield TMaps without the
* "correct" types.
*/
public override TMap ReadMapBegin()
{
int size = (int)ReadVarint32();
byte keyAndValueType = size == 0 ? (byte)0 : (byte)ReadByte();
return new TMap(getTType((byte)(keyAndValueType >> 4)), getTType((byte)(keyAndValueType & 0xf)), size);
}
/**
* Read a list header off the wire. If the list size is 0-14, the size will
* be packed into the element type header. If it's a longer list, the 4 MSB
* of the element type header will be 0xF, and a varint will follow with the
* true size.
*/
public override TList ReadListBegin()
{
byte size_and_type = (byte)ReadByte();
int size = (size_and_type >> 4) & 0x0f;
if (size == 15)
{
size = (int)ReadVarint32();
}
TType type = getTType(size_and_type);
return new TList(type, size);
}
/**
* Read a set header off the wire. If the set size is 0-14, the size will
* be packed into the element type header. If it's a longer set, the 4 MSB
* of the element type header will be 0xF, and a varint will follow with the
* true size.
*/
public override TSet ReadSetBegin()
{
return new TSet(ReadListBegin());
}
/**
* Read a boolean off the wire. If this is a boolean field, the value should
* already have been Read during ReadFieldBegin, so we'll just consume the
* pre-stored value. Otherwise, Read a byte.
*/
public override Boolean ReadBool()
{
if (boolValue_ != null)
{
bool result = boolValue_.Value;
boolValue_ = null;
return result;
}
return ReadByte() == Types.BOOLEAN_TRUE;
}
byte[] byteRawBuf = new byte[1];
/**
* Read a single byte off the wire. Nothing interesting here.
*/
public override sbyte ReadByte()
{
trans.ReadAll(byteRawBuf, 0, 1);
return (sbyte)byteRawBuf[0];
}
/**
* Read an i16 from the wire as a zigzag varint.
*/
public override short ReadI16()
{
return (short)zigzagToInt(ReadVarint32());
}
/**
* Read an i32 from the wire as a zigzag varint.
*/
public override int ReadI32()
{
return zigzagToInt(ReadVarint32());
}
/**
* Read an i64 from the wire as a zigzag varint.
*/
public override long ReadI64()
{
return zigzagToLong(ReadVarint64());
}
/**
* No magic here - just Read a double off the wire.
*/
public override double ReadDouble()
{
byte[] longBits = new byte[8];
trans.ReadAll(longBits, 0, 8);
return BitConverter.Int64BitsToDouble(bytesToLong(longBits));
}
/**
* Reads a byte[] (via ReadBinary), and then UTF-8 decodes it.
*/
public override String ReadString()
{
int length = (int)ReadVarint32();
if (length == 0)
{
return "";
}
return Encoding.UTF8.GetString(ReadBinary(length));
}
/**
* Read a byte[] from the wire.
*/
public override byte[] ReadBinary()
{
int length = (int)ReadVarint32();
if (length == 0) return new byte[0];
byte[] buf = new byte[length];
trans.ReadAll(buf, 0, length);
return buf;
}
/**
* Read a byte[] of a known length from the wire.
*/
private byte[] ReadBinary(int length)
{
if (length == 0) return new byte[0];
byte[] buf = new byte[length];
trans.ReadAll(buf, 0, length);
return buf;
}
//
// These methods are here for the struct to call, but don't have any wire
// encoding.
//
public override void ReadMessageEnd() { }
public override void ReadFieldEnd() { }
public override void ReadMapEnd() { }
public override void ReadListEnd() { }
public override void ReadSetEnd() { }
//
// Internal Reading methods
//
/**
* Read an i32 from the wire as a varint. The MSB of each byte is set
* if there is another byte to follow. This can Read up to 5 bytes.
*/
private uint ReadVarint32()
{
uint result = 0;
int shift = 0;
while (true)
{
byte b = (byte)ReadByte();
result |= (uint)(b & 0x7f) << shift;
if ((b & 0x80) != 0x80) break;
shift += 7;
}
return result;
}
/**
* Read an i64 from the wire as a proper varint. The MSB of each byte is set
* if there is another byte to follow. This can Read up to 10 bytes.
*/
private ulong ReadVarint64()
{
int shift = 0;
ulong result = 0;
while (true)
{
byte b = (byte)ReadByte();
result |= (ulong)(b & 0x7f) << shift;
if ((b & 0x80) != 0x80) break;
shift += 7;
}
return result;
}
#endregion
//
// encoding helpers
//
/**
* Convert from zigzag int to int.
*/
private int zigzagToInt(uint n)
{
return (int)(n >> 1) ^ (-(int)(n & 1));
}
/**
* Convert from zigzag long to long.
*/
private long zigzagToLong(ulong n)
{
return (long)(n >> 1) ^ (-(long)(n & 1));
}
/**
* Note that it's important that the mask bytes are long literals,
* otherwise they'll default to ints, and when you shift an int left 56 bits,
* you just get a messed up int.
*/
private long bytesToLong(byte[] bytes)
{
return
((bytes[7] & 0xffL) << 56) |
((bytes[6] & 0xffL) << 48) |
((bytes[5] & 0xffL) << 40) |
((bytes[4] & 0xffL) << 32) |
((bytes[3] & 0xffL) << 24) |
((bytes[2] & 0xffL) << 16) |
((bytes[1] & 0xffL) << 8) |
((bytes[0] & 0xffL));
}
//
// type testing and converting
//
private Boolean isBoolType(byte b)
{
int lowerNibble = b & 0x0f;
return lowerNibble == Types.BOOLEAN_TRUE || lowerNibble == Types.BOOLEAN_FALSE;
}
/**
* Given a TCompactProtocol.Types constant, convert it to its corresponding
* TType value.
*/
private TType getTType(byte type)
{
switch ((byte)(type & 0x0f))
{
case Types.STOP:
return TType.Stop;
case Types.BOOLEAN_FALSE:
case Types.BOOLEAN_TRUE:
return TType.Bool;
case Types.BYTE:
return TType.Byte;
case Types.I16:
return TType.I16;
case Types.I32:
return TType.I32;
case Types.I64:
return TType.I64;
case Types.DOUBLE:
return TType.Double;
case Types.BINARY:
return TType.String;
case Types.LIST:
return TType.List;
case Types.SET:
return TType.Set;
case Types.MAP:
return TType.Map;
case Types.STRUCT:
return TType.Struct;
default:
throw new TProtocolException("don't know what type: " + (byte)(type & 0x0f));
}
}
/**
* Given a TType value, find the appropriate TCompactProtocol.Types constant.
*/
private byte getCompactType(TType ttype)
{
return ttypeToCompactType[(int)ttype];
}
}
}
| 1 | 14,271 | Why are we doing that change? | apache-thrift | c |
@@ -220,7 +220,7 @@ public class ErrorHandler {
private Throwable rebuildServerError(Map<String, Object> rawErrorData, int responseStatus) {
- if (!rawErrorData.containsKey(CLASS) && !rawErrorData.containsKey(STACK_TRACE)) {
+ if (rawErrorData.get(CLASS) == null || rawErrorData.get(STACK_TRACE) == null) {
// Not enough information for us to try to rebuild an error.
return null;
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote;
import static org.openqa.selenium.remote.ErrorCodes.SUCCESS;
import com.google.common.base.Throwables;
import com.google.common.primitives.Ints;
import org.openqa.selenium.UnhandledAlertException;
import org.openqa.selenium.WebDriverException;
import java.lang.reflect.Constructor;
import java.math.BigDecimal;
import java.math.RoundingMode;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Function;
/**
* Maps exceptions to status codes for sending over the wire.
*/
public class ErrorHandler {
private static final String MESSAGE = "message";
private static final String SCREEN_SHOT = "screen";
private static final String CLASS = "class";
private static final String STACK_TRACE = "stackTrace";
private static final String LINE_NUMBER = "lineNumber";
private static final String METHOD_NAME = "methodName";
private static final String CLASS_NAME = "className";
private static final String FILE_NAME = "fileName";
private static final String UNKNOWN_CLASS = "<anonymous class>";
private static final String UNKNOWN_METHOD = "<anonymous method>";
private static final String UNKNOWN_FILE = null;
private ErrorCodes errorCodes;
private boolean includeServerErrors;
public ErrorHandler() {
this(true);
}
/**
* @param includeServerErrors Whether to include server-side details in thrown exceptions if the
* information is available.
*/
public ErrorHandler(boolean includeServerErrors) {
this.includeServerErrors = includeServerErrors;
this.errorCodes = new ErrorCodes();
}
/**
* @param includeServerErrors Whether to include server-side details in thrown exceptions if the
* information is available.
* @param codes The ErrorCodes object to use for linking error codes to exceptions.
*/
public ErrorHandler(ErrorCodes codes, boolean includeServerErrors) {
this.includeServerErrors = includeServerErrors;
this.errorCodes = codes;
}
public boolean isIncludeServerErrors() {
return includeServerErrors;
}
public void setIncludeServerErrors(boolean includeServerErrors) {
this.includeServerErrors = includeServerErrors;
}
@SuppressWarnings("unchecked")
public Response throwIfResponseFailed(Response response, long duration) throws RuntimeException {
if (response.getStatus() == null || response.getStatus() == SUCCESS) {
return response;
}
if (response.getValue() instanceof Throwable) {
Throwable throwable = (Throwable) response.getValue();
Throwables.throwIfUnchecked(throwable);
throw new RuntimeException(throwable);
}
Class<? extends WebDriverException> outerErrorType =
errorCodes.getExceptionType(response.getStatus());
Object value = response.getValue();
String message = null;
Throwable cause = null;
if (value instanceof Map) {
Map<String, Object> rawErrorData = (Map<String, Object>) value;
if (!rawErrorData.containsKey(MESSAGE) && rawErrorData.containsKey("value")) {
try {
rawErrorData = (Map<String, Object>) rawErrorData.get("value");
} catch (ClassCastException cce) {}
}
try {
message = (String) rawErrorData.get(MESSAGE);
} catch (ClassCastException e) {
// Ok, try to recover gracefully.
message = String.valueOf(e);
}
Throwable serverError = rebuildServerError(rawErrorData, response.getStatus());
// If serverError is null, then the server did not provide a className (only expected if
// the server is a Java process) or a stack trace. The lack of a className is OK, but
// not having a stacktrace really hurts our ability to debug problems.
if (serverError == null) {
if (includeServerErrors) {
// TODO: this should probably link to a wiki article with more info.
message += " (WARNING: The server did not provide any stacktrace information)";
}
} else if (!includeServerErrors) {
// TODO: wiki article with more info.
message += " (WARNING: The client has suppressed server-side stacktraces)";
} else {
cause = serverError;
}
if (rawErrorData.get(SCREEN_SHOT) != null) {
cause = new ScreenshotException(String.valueOf(rawErrorData.get(SCREEN_SHOT)), cause);
}
} else if (value != null) {
message = String.valueOf(value);
}
String duration1 = duration(duration);
if (message != null && !message.contains(duration1)) {
message = message + duration1;
}
WebDriverException toThrow = null;
if (outerErrorType.equals(UnhandledAlertException.class)
&& value instanceof Map) {
toThrow = createUnhandledAlertException(value);
}
if (toThrow == null) {
toThrow = createThrowable(outerErrorType,
new Class<?>[] {String.class, Throwable.class, Integer.class},
new Object[] {message, cause, response.getStatus()});
}
if (toThrow == null) {
toThrow = createThrowable(outerErrorType,
new Class<?>[] {String.class, Throwable.class},
new Object[] {message, cause});
}
if (toThrow == null) {
toThrow = createThrowable(outerErrorType,
new Class<?>[] {String.class},
new Object[] {message});
}
if (toThrow == null) {
toThrow = new WebDriverException(message, cause);
}
throw toThrow;
}
@SuppressWarnings("unchecked")
private UnhandledAlertException createUnhandledAlertException(Object value) {
Map<String, Object> rawErrorData = (Map<String, Object>) value;
if (rawErrorData.containsKey("alert") || rawErrorData.containsKey("alertText")) {
Object alertText = rawErrorData.get("alertText");
if (alertText == null) {
Map<String, Object> alert = (Map<String, Object>) rawErrorData.get("alert");
if (alert != null) {
alertText = alert.get("text");
}
}
return createThrowable(UnhandledAlertException.class,
new Class<?>[] {String.class, String.class},
new Object[] {rawErrorData.get("message"), alertText});
}
return null;
}
private String duration(long duration) {
String prefix = "\nCommand duration or timeout: ";
if (duration < 1000) {
return prefix + duration + " milliseconds";
}
return prefix + (new BigDecimal(duration).divide(new BigDecimal(1000)).setScale(2, RoundingMode.HALF_UP)) + " seconds";
}
private <T extends Throwable> T createThrowable(
Class<T> clazz, Class<?>[] parameterTypes, Object[] parameters) {
try {
Constructor<T> constructor = clazz.getConstructor(parameterTypes);
return constructor.newInstance(parameters);
} catch (OutOfMemoryError | ReflectiveOperationException e) {
// Do nothing - fall through.
}
return null;
}
private Throwable rebuildServerError(Map<String, Object> rawErrorData, int responseStatus) {
if (!rawErrorData.containsKey(CLASS) && !rawErrorData.containsKey(STACK_TRACE)) {
// Not enough information for us to try to rebuild an error.
return null;
}
Throwable toReturn = null;
String message = (String) rawErrorData.get(MESSAGE);
Class<?> clazz = null;
// First: allow Remote Driver to specify the Selenium Server internal exception
if (rawErrorData.containsKey(CLASS)) {
String className = (String) rawErrorData.get(CLASS);
try {
clazz = Class.forName(className);
} catch (ClassNotFoundException ignored) {
// Ok, fall-through
}
}
// If the above fails, map Response Status to Exception class
if (null == clazz) {
clazz = errorCodes.getExceptionType(responseStatus);
}
if (clazz.equals(UnhandledAlertException.class)) {
toReturn = createUnhandledAlertException(rawErrorData);
} else if (Throwable.class.isAssignableFrom(clazz)) {
@SuppressWarnings({"unchecked"})
Class<? extends Throwable> throwableType = (Class<? extends Throwable>) clazz;
toReturn = createThrowable(
throwableType,
new Class<?>[] {String.class},
new Object[] {message});
}
if (toReturn == null) {
toReturn = new UnknownServerException(message);
}
// Note: if we have a class name above, we should always have a stack trace.
// The inverse is not always true.
StackTraceElement[] stackTrace = new StackTraceElement[0];
if (rawErrorData.containsKey(STACK_TRACE)) {
@SuppressWarnings({"unchecked"})
List<Map<String, Object>> stackTraceInfo =
(List<Map<String, Object>>) rawErrorData.get(STACK_TRACE);
stackTrace = stackTraceInfo.stream()
.map(entry -> new FrameInfoToStackFrame().apply(entry))
.filter(Objects::nonNull)
.toArray(StackTraceElement[]::new);
}
toReturn.setStackTrace(stackTrace);
return toReturn;
}
/**
* Exception used as a place holder if the server returns an error without a stack trace.
*/
public static class UnknownServerException extends WebDriverException {
private UnknownServerException(String s) {
super(s);
}
}
/**
* Function that can rebuild a {@link StackTraceElement} from the frame info included with a
* WebDriver JSON response.
*/
private static class FrameInfoToStackFrame
implements Function<Map<String, Object>, StackTraceElement> {
public StackTraceElement apply(Map<String, Object> frameInfo) {
if (frameInfo == null) {
return null;
}
Optional<Number> maybeLineNumberInteger = Optional.empty();
final Object lineNumberObject = frameInfo.get(LINE_NUMBER);
if (lineNumberObject instanceof Number) {
maybeLineNumberInteger = Optional.of((Number) lineNumberObject);
} else if (lineNumberObject != null) {
// might be a Number as a String
maybeLineNumberInteger = Optional.ofNullable(Ints.tryParse(lineNumberObject.toString()));
}
// default -1 for unknown, see StackTraceElement constructor javadoc
final int lineNumber = maybeLineNumberInteger.orElse(-1).intValue();
// Gracefully handle remote servers that don't (or can't) send back
// complete stack trace info. At least some of this information should
// be included...
String className = frameInfo.containsKey(CLASS_NAME) ?
toStringOrNull(frameInfo.get(CLASS_NAME)) : UNKNOWN_CLASS;
String methodName = frameInfo.containsKey(METHOD_NAME) ?
toStringOrNull(frameInfo.get(METHOD_NAME)) : UNKNOWN_METHOD;
String fileName = frameInfo.containsKey(FILE_NAME) ?
toStringOrNull(frameInfo.get(FILE_NAME)) : UNKNOWN_FILE;
return new StackTraceElement(
className,
methodName,
fileName,
lineNumber);
}
private static String toStringOrNull(Object o) {
return o == null ? null : o.toString();
}
}
}
| 1 | 15,006 | Why && changed to || ? | SeleniumHQ-selenium | py |
@@ -196,6 +196,15 @@ def _unpickle(filepath, serializer):
raise ValueError(f'Unrecognized serializer type: {serializer}')
+def collection_to_single_partition(collection):
+ """Merge the parts of a Dask collection into a single partition."""
+ if collection is None:
+ return
+ if isinstance(collection, da.Array):
+ return collection.rechunk(*collection.shape)
+ return collection.repartition(npartitions=1)
+
+
@pytest.mark.parametrize('output', data_output)
@pytest.mark.parametrize('centers', data_centers)
def test_classifier(output, centers, client, listen_port): | 1 | # coding: utf-8
"""Tests for lightgbm.dask module"""
import inspect
import pickle
import socket
from itertools import groupby
from os import getenv
from sys import platform
import lightgbm as lgb
import pytest
if not platform.startswith('linux'):
pytest.skip('lightgbm.dask is currently supported in Linux environments', allow_module_level=True)
if not lgb.compat.DASK_INSTALLED:
pytest.skip('Dask is not installed', allow_module_level=True)
import cloudpickle
import dask.array as da
import dask.dataframe as dd
import joblib
import numpy as np
import pandas as pd
from scipy.stats import spearmanr
from dask.array.utils import assert_eq
from dask.distributed import default_client, Client, LocalCluster, wait
from distributed.utils_test import client, cluster_fixture, gen_cluster, loop
from scipy.sparse import csr_matrix
from sklearn.datasets import make_blobs, make_regression
from .utils import make_ranking
# time, in seconds, to wait for the Dask client to close. Used to avoid teardown errors
# see https://distributed.dask.org/en/latest/api.html#distributed.Client.close
CLIENT_CLOSE_TIMEOUT = 120
tasks = ['classification', 'regression', 'ranking']
data_output = ['array', 'scipy_csr_matrix', 'dataframe', 'dataframe-with-categorical']
data_centers = [[[-4, -4], [4, 4]], [[-4, -4], [4, 4], [-4, 4]]]
group_sizes = [5, 5, 5, 10, 10, 10, 20, 20, 20, 50, 50]
pytestmark = [
pytest.mark.skipif(getenv('TASK', '') == 'mpi', reason='Fails to run with MPI interface'),
pytest.mark.skipif(getenv('TASK', '') == 'gpu', reason='Fails to run with GPU interface')
]
@pytest.fixture()
def listen_port():
listen_port.port += 10
return listen_port.port
listen_port.port = 13000
def _create_ranking_data(n_samples=100, output='array', chunk_size=50, **kwargs):
X, y, g = make_ranking(n_samples=n_samples, random_state=42, **kwargs)
rnd = np.random.RandomState(42)
w = rnd.rand(X.shape[0]) * 0.01
g_rle = np.array([len(list(grp)) for _, grp in groupby(g)])
if output.startswith('dataframe'):
# add target, weight, and group to DataFrame so that partitions abide by group boundaries.
X_df = pd.DataFrame(X, columns=[f'feature_{i}' for i in range(X.shape[1])])
if output == 'dataframe-with-categorical':
for i in range(5):
col_name = "cat_col" + str(i)
cat_values = rnd.choice(['a', 'b'], X.shape[0])
cat_series = pd.Series(
cat_values,
dtype='category'
)
X_df[col_name] = cat_series
X = X_df.copy()
X_df = X_df.assign(y=y, g=g, w=w)
# set_index ensures partitions are based on group id.
# See https://stackoverflow.com/questions/49532824/dask-dataframe-split-partitions-based-on-a-column-or-function.
X_df.set_index('g', inplace=True)
dX = dd.from_pandas(X_df, chunksize=chunk_size)
# separate target, weight from features.
dy = dX['y']
dw = dX['w']
dX = dX.drop(columns=['y', 'w'])
dg = dX.index.to_series()
# encode group identifiers into run-length encoding, the format LightGBMRanker is expecting
# so that within each partition, sum(g) = n_samples.
dg = dg.map_partitions(lambda p: p.groupby('g', sort=False).apply(lambda z: z.shape[0]))
elif output == 'array':
# ranking arrays: one chunk per group. Each chunk must include all columns.
p = X.shape[1]
dX, dy, dw, dg = [], [], [], []
for g_idx, rhs in enumerate(np.cumsum(g_rle)):
lhs = rhs - g_rle[g_idx]
dX.append(da.from_array(X[lhs:rhs, :], chunks=(rhs - lhs, p)))
dy.append(da.from_array(y[lhs:rhs]))
dw.append(da.from_array(w[lhs:rhs]))
dg.append(da.from_array(np.array([g_rle[g_idx]])))
dX = da.concatenate(dX, axis=0)
dy = da.concatenate(dy, axis=0)
dw = da.concatenate(dw, axis=0)
dg = da.concatenate(dg, axis=0)
else:
raise ValueError('Ranking data creation only supported for Dask arrays and dataframes')
return X, y, w, g_rle, dX, dy, dw, dg
def _create_data(objective, n_samples=100, centers=2, output='array', chunk_size=50):
if objective == 'classification':
X, y = make_blobs(n_samples=n_samples, centers=centers, random_state=42)
elif objective == 'regression':
X, y = make_regression(n_samples=n_samples, random_state=42)
else:
raise ValueError("Unknown objective '%s'" % objective)
rnd = np.random.RandomState(42)
weights = rnd.random(X.shape[0]) * 0.01
if output == 'array':
dX = da.from_array(X, (chunk_size, X.shape[1]))
dy = da.from_array(y, chunk_size)
dw = da.from_array(weights, chunk_size)
elif output.startswith('dataframe'):
X_df = pd.DataFrame(X, columns=['feature_%d' % i for i in range(X.shape[1])])
if output == 'dataframe-with-categorical':
num_cat_cols = 5
for i in range(num_cat_cols):
col_name = "cat_col" + str(i)
cat_values = rnd.choice(['a', 'b'], X.shape[0])
cat_series = pd.Series(
cat_values,
dtype='category'
)
X_df[col_name] = cat_series
X = np.hstack((X, cat_series.cat.codes.values.reshape(-1, 1)))
# for the small data sizes used in tests, it's hard to get LGBMRegressor to choose
# categorical features for splits. So for regression tests with categorical features,
# _create_data() returns a DataFrame with ONLY categorical features
if objective == 'regression':
cat_cols = [col for col in X_df.columns if col.startswith('cat_col')]
X_df = X_df[cat_cols]
X = X[:, -num_cat_cols:]
y_df = pd.Series(y, name='target')
dX = dd.from_pandas(X_df, chunksize=chunk_size)
dy = dd.from_pandas(y_df, chunksize=chunk_size)
dw = dd.from_array(weights, chunksize=chunk_size)
elif output == 'scipy_csr_matrix':
dX = da.from_array(X, chunks=(chunk_size, X.shape[1])).map_blocks(csr_matrix)
dy = da.from_array(y, chunks=chunk_size)
dw = da.from_array(weights, chunk_size)
else:
raise ValueError("Unknown output type '%s'" % output)
return X, y, weights, dX, dy, dw
def _r2_score(dy_true, dy_pred):
numerator = ((dy_true - dy_pred) ** 2).sum(axis=0, dtype=np.float64)
denominator = ((dy_true - dy_pred.mean(axis=0)) ** 2).sum(axis=0, dtype=np.float64)
return (1 - numerator / denominator).compute()
def _accuracy_score(dy_true, dy_pred):
return da.average(dy_true == dy_pred).compute()
def _pickle(obj, filepath, serializer):
if serializer == 'pickle':
with open(filepath, 'wb') as f:
pickle.dump(obj, f)
elif serializer == 'joblib':
joblib.dump(obj, filepath)
elif serializer == 'cloudpickle':
with open(filepath, 'wb') as f:
cloudpickle.dump(obj, f)
else:
raise ValueError(f'Unrecognized serializer type: {serializer}')
def _unpickle(filepath, serializer):
if serializer == 'pickle':
with open(filepath, 'rb') as f:
return pickle.load(f)
elif serializer == 'joblib':
return joblib.load(filepath)
elif serializer == 'cloudpickle':
with open(filepath, 'rb') as f:
return cloudpickle.load(f)
else:
raise ValueError(f'Unrecognized serializer type: {serializer}')
@pytest.mark.parametrize('output', data_output)
@pytest.mark.parametrize('centers', data_centers)
def test_classifier(output, centers, client, listen_port):
X, y, w, dX, dy, dw = _create_data(
objective='classification',
output=output,
centers=centers
)
params = {
"n_estimators": 10,
"num_leaves": 10
}
if output == 'dataframe-with-categorical':
params["categorical_feature"] = [
i for i, col in enumerate(dX.columns) if col.startswith('cat_')
]
dask_classifier = lgb.DaskLGBMClassifier(
client=client,
time_out=5,
local_listen_port=listen_port,
**params
)
dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw)
p1 = dask_classifier.predict(dX)
p1_proba = dask_classifier.predict_proba(dX).compute()
p1_pred_leaf = dask_classifier.predict(dX, pred_leaf=True)
p1_local = dask_classifier.to_local().predict(X)
s1 = _accuracy_score(dy, p1)
p1 = p1.compute()
local_classifier = lgb.LGBMClassifier(**params)
local_classifier.fit(X, y, sample_weight=w)
p2 = local_classifier.predict(X)
p2_proba = local_classifier.predict_proba(X)
s2 = local_classifier.score(X, y)
assert_eq(s1, s2)
assert_eq(p1, p2)
assert_eq(y, p1)
assert_eq(y, p2)
assert_eq(p1_proba, p2_proba, atol=0.3)
assert_eq(p1_local, p2)
assert_eq(y, p1_local)
# pref_leaf values should have the right shape
# and values that look like valid tree nodes
pred_leaf_vals = p1_pred_leaf.compute()
assert pred_leaf_vals.shape == (
X.shape[0],
dask_classifier.booster_.num_trees()
)
assert np.max(pred_leaf_vals) <= params['num_leaves']
assert np.min(pred_leaf_vals) >= 0
assert len(np.unique(pred_leaf_vals)) <= params['num_leaves']
# be sure LightGBM actually used at least one categorical column,
# and that it was correctly treated as a categorical feature
if output == 'dataframe-with-categorical':
cat_cols = [
col for col in dX.columns
if dX.dtypes[col].name == 'category'
]
tree_df = dask_classifier.booster_.trees_to_dataframe()
node_uses_cat_col = tree_df['split_feature'].isin(cat_cols)
assert node_uses_cat_col.sum() > 0
assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == '=='
client.close(timeout=CLIENT_CLOSE_TIMEOUT)
@pytest.mark.parametrize('output', data_output)
@pytest.mark.parametrize('centers', data_centers)
def test_classifier_pred_contrib(output, centers, client, listen_port):
X, y, w, dX, dy, dw = _create_data(
objective='classification',
output=output,
centers=centers
)
params = {
"n_estimators": 10,
"num_leaves": 10
}
if output == 'dataframe-with-categorical':
params["categorical_feature"] = [
i for i, col in enumerate(dX.columns) if col.startswith('cat_')
]
dask_classifier = lgb.DaskLGBMClassifier(
client=client,
time_out=5,
local_listen_port=listen_port,
tree_learner='data',
**params
)
dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw)
preds_with_contrib = dask_classifier.predict(dX, pred_contrib=True).compute()
local_classifier = lgb.LGBMClassifier(**params)
local_classifier.fit(X, y, sample_weight=w)
local_preds_with_contrib = local_classifier.predict(X, pred_contrib=True)
if output == 'scipy_csr_matrix':
preds_with_contrib = np.array(preds_with_contrib.todense())
# be sure LightGBM actually used at least one categorical column,
# and that it was correctly treated as a categorical feature
if output == 'dataframe-with-categorical':
cat_cols = [
col for col in dX.columns
if dX.dtypes[col].name == 'category'
]
tree_df = dask_classifier.booster_.trees_to_dataframe()
node_uses_cat_col = tree_df['split_feature'].isin(cat_cols)
assert node_uses_cat_col.sum() > 0
assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == '=='
# shape depends on whether it is binary or multiclass classification
num_features = dask_classifier.n_features_
num_classes = dask_classifier.n_classes_
if num_classes == 2:
expected_num_cols = num_features + 1
else:
expected_num_cols = (num_features + 1) * num_classes
# * shape depends on whether it is binary or multiclass classification
# * matrix for binary classification is of the form [feature_contrib, base_value],
# for multi-class it's [feat_contrib_class1, base_value_class1, feat_contrib_class2, base_value_class2, etc.]
# * contrib outputs for distributed training are different than from local training, so we can just test
# that the output has the right shape and base values are in the right position
assert preds_with_contrib.shape[1] == expected_num_cols
assert preds_with_contrib.shape == local_preds_with_contrib.shape
if num_classes == 2:
assert len(np.unique(preds_with_contrib[:, num_features]) == 1)
else:
for i in range(num_classes):
base_value_col = num_features * (i + 1) + i
assert len(np.unique(preds_with_contrib[:, base_value_col]) == 1)
client.close(timeout=CLIENT_CLOSE_TIMEOUT)
def test_training_does_not_fail_on_port_conflicts(client):
_, _, _, dX, dy, dw = _create_data('classification', output='array')
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('127.0.0.1', 12400))
dask_classifier = lgb.DaskLGBMClassifier(
client=client,
time_out=5,
local_listen_port=12400,
n_estimators=5,
num_leaves=5
)
for _ in range(5):
dask_classifier.fit(
X=dX,
y=dy,
sample_weight=dw,
)
assert dask_classifier.booster_
client.close(timeout=CLIENT_CLOSE_TIMEOUT)
@pytest.mark.parametrize('output', data_output)
def test_regressor(output, client, listen_port):
X, y, w, dX, dy, dw = _create_data(
objective='regression',
output=output
)
params = {
"random_state": 42,
"num_leaves": 10
}
if output == 'dataframe-with-categorical':
params["categorical_feature"] = [
i for i, col in enumerate(dX.columns) if col.startswith('cat_')
]
dask_regressor = lgb.DaskLGBMRegressor(
client=client,
time_out=5,
local_listen_port=listen_port,
tree='data',
**params
)
dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw)
p1 = dask_regressor.predict(dX)
p1_pred_leaf = dask_regressor.predict(dX, pred_leaf=True)
if not output.startswith('dataframe'):
s1 = _r2_score(dy, p1)
p1 = p1.compute()
p1_local = dask_regressor.to_local().predict(X)
s1_local = dask_regressor.to_local().score(X, y)
local_regressor = lgb.LGBMRegressor(**params)
local_regressor.fit(X, y, sample_weight=w)
s2 = local_regressor.score(X, y)
p2 = local_regressor.predict(X)
# Scores should be the same
if not output.startswith('dataframe'):
assert_eq(s1, s2, atol=.01)
assert_eq(s1, s1_local, atol=.003)
# Predictions should be roughly the same.
assert_eq(p1, p1_local)
# pref_leaf values should have the right shape
# and values that look like valid tree nodes
pred_leaf_vals = p1_pred_leaf.compute()
assert pred_leaf_vals.shape == (
X.shape[0],
dask_regressor.booster_.num_trees()
)
assert np.max(pred_leaf_vals) <= params['num_leaves']
assert np.min(pred_leaf_vals) >= 0
assert len(np.unique(pred_leaf_vals)) <= params['num_leaves']
# The checks below are skipped
# for the categorical data case because it's difficult to get
# a good fit from just categoricals for a regression problem
# with small data
if output != 'dataframe-with-categorical':
assert_eq(y, p1, rtol=1., atol=100.)
assert_eq(y, p2, rtol=1., atol=50.)
# be sure LightGBM actually used at least one categorical column,
# and that it was correctly treated as a categorical feature
if output == 'dataframe-with-categorical':
cat_cols = [
col for col in dX.columns
if dX.dtypes[col].name == 'category'
]
tree_df = dask_regressor.booster_.trees_to_dataframe()
node_uses_cat_col = tree_df['split_feature'].isin(cat_cols)
assert node_uses_cat_col.sum() > 0
assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == '=='
client.close(timeout=CLIENT_CLOSE_TIMEOUT)
@pytest.mark.parametrize('output', data_output)
def test_regressor_pred_contrib(output, client, listen_port):
X, y, w, dX, dy, dw = _create_data(
objective='regression',
output=output
)
params = {
"n_estimators": 10,
"num_leaves": 10
}
if output == 'dataframe-with-categorical':
params["categorical_feature"] = [
i for i, col in enumerate(dX.columns) if col.startswith('cat_')
]
dask_regressor = lgb.DaskLGBMRegressor(
client=client,
time_out=5,
local_listen_port=listen_port,
tree_learner='data',
**params
)
dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw)
preds_with_contrib = dask_regressor.predict(dX, pred_contrib=True).compute()
local_regressor = lgb.LGBMRegressor(**params)
local_regressor.fit(X, y, sample_weight=w)
local_preds_with_contrib = local_regressor.predict(X, pred_contrib=True)
if output == "scipy_csr_matrix":
preds_with_contrib = np.array(preds_with_contrib.todense())
# contrib outputs for distributed training are different than from local training, so we can just test
# that the output has the right shape and base values are in the right position
num_features = dX.shape[1]
assert preds_with_contrib.shape[1] == num_features + 1
assert preds_with_contrib.shape == local_preds_with_contrib.shape
# be sure LightGBM actually used at least one categorical column,
# and that it was correctly treated as a categorical feature
if output == 'dataframe-with-categorical':
cat_cols = [
col for col in dX.columns
if dX.dtypes[col].name == 'category'
]
tree_df = dask_regressor.booster_.trees_to_dataframe()
node_uses_cat_col = tree_df['split_feature'].isin(cat_cols)
assert node_uses_cat_col.sum() > 0
assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == '=='
client.close(timeout=CLIENT_CLOSE_TIMEOUT)
@pytest.mark.parametrize('output', data_output)
@pytest.mark.parametrize('alpha', [.1, .5, .9])
def test_regressor_quantile(output, client, listen_port, alpha):
X, y, w, dX, dy, dw = _create_data(
objective='regression',
output=output
)
params = {
"objective": "quantile",
"alpha": alpha,
"random_state": 42,
"n_estimators": 10,
"num_leaves": 10
}
if output == 'dataframe-with-categorical':
params["categorical_feature"] = [
i for i, col in enumerate(dX.columns) if col.startswith('cat_')
]
dask_regressor = lgb.DaskLGBMRegressor(
client=client,
local_listen_port=listen_port,
tree_learner_type='data_parallel',
**params
)
dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw)
p1 = dask_regressor.predict(dX).compute()
q1 = np.count_nonzero(y < p1) / y.shape[0]
local_regressor = lgb.LGBMRegressor(**params)
local_regressor.fit(X, y, sample_weight=w)
p2 = local_regressor.predict(X)
q2 = np.count_nonzero(y < p2) / y.shape[0]
# Quantiles should be right
np.testing.assert_allclose(q1, alpha, atol=0.2)
np.testing.assert_allclose(q2, alpha, atol=0.2)
# be sure LightGBM actually used at least one categorical column,
# and that it was correctly treated as a categorical feature
if output == 'dataframe-with-categorical':
cat_cols = [
col for col in dX.columns
if dX.dtypes[col].name == 'category'
]
tree_df = dask_regressor.booster_.trees_to_dataframe()
node_uses_cat_col = tree_df['split_feature'].isin(cat_cols)
assert node_uses_cat_col.sum() > 0
assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == '=='
client.close(timeout=CLIENT_CLOSE_TIMEOUT)
@pytest.mark.parametrize('output', ['array', 'dataframe', 'dataframe-with-categorical'])
@pytest.mark.parametrize('group', [None, group_sizes])
def test_ranker(output, client, listen_port, group):
if output == 'dataframe-with-categorical':
X, y, w, g, dX, dy, dw, dg = _create_ranking_data(
output=output,
group=group,
n_features=1,
n_informative=1
)
else:
X, y, w, g, dX, dy, dw, dg = _create_ranking_data(
output=output,
group=group,
)
# rebalance small dask.array dataset for better performance.
if output == 'array':
dX = dX.persist()
dy = dy.persist()
dw = dw.persist()
dg = dg.persist()
_ = wait([dX, dy, dw, dg])
client.rebalance()
# use many trees + leaves to overfit, help ensure that dask data-parallel strategy matches that of
# serial learner. See https://github.com/microsoft/LightGBM/issues/3292#issuecomment-671288210.
params = {
"random_state": 42,
"n_estimators": 50,
"num_leaves": 20,
"min_child_samples": 1
}
if output == 'dataframe-with-categorical':
params["categorical_feature"] = [
i for i, col in enumerate(dX.columns) if col.startswith('cat_')
]
dask_ranker = lgb.DaskLGBMRanker(
client=client,
time_out=5,
local_listen_port=listen_port,
tree_learner_type='data_parallel',
**params
)
dask_ranker = dask_ranker.fit(dX, dy, sample_weight=dw, group=dg)
rnkvec_dask = dask_ranker.predict(dX)
rnkvec_dask = rnkvec_dask.compute()
p1_pred_leaf = dask_ranker.predict(dX, pred_leaf=True)
rnkvec_dask_local = dask_ranker.to_local().predict(X)
local_ranker = lgb.LGBMRanker(**params)
local_ranker.fit(X, y, sample_weight=w, group=g)
rnkvec_local = local_ranker.predict(X)
# distributed ranker should be able to rank decently well and should
# have high rank correlation with scores from serial ranker.
dcor = spearmanr(rnkvec_dask, y).correlation
assert dcor > 0.6
assert spearmanr(rnkvec_dask, rnkvec_local).correlation > 0.8
assert_eq(rnkvec_dask, rnkvec_dask_local)
# pref_leaf values should have the right shape
# and values that look like valid tree nodes
pred_leaf_vals = p1_pred_leaf.compute()
assert pred_leaf_vals.shape == (
X.shape[0],
dask_ranker.booster_.num_trees()
)
assert np.max(pred_leaf_vals) <= params['num_leaves']
assert np.min(pred_leaf_vals) >= 0
assert len(np.unique(pred_leaf_vals)) <= params['num_leaves']
# be sure LightGBM actually used at least one categorical column,
# and that it was correctly treated as a categorical feature
if output == 'dataframe-with-categorical':
cat_cols = [
col for col in dX.columns
if dX.dtypes[col].name == 'category'
]
tree_df = dask_ranker.booster_.trees_to_dataframe()
node_uses_cat_col = tree_df['split_feature'].isin(cat_cols)
assert node_uses_cat_col.sum() > 0
assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == '=='
client.close(timeout=CLIENT_CLOSE_TIMEOUT)
@pytest.mark.parametrize('task', tasks)
def test_training_works_if_client_not_provided_or_set_after_construction(task, listen_port, client):
if task == 'ranking':
_, _, _, _, dX, dy, _, dg = _create_ranking_data(
output='array',
group=None
)
model_factory = lgb.DaskLGBMRanker
else:
_, _, _, dX, dy, _ = _create_data(
objective=task,
output='array',
)
dg = None
if task == 'classification':
model_factory = lgb.DaskLGBMClassifier
elif task == 'regression':
model_factory = lgb.DaskLGBMRegressor
params = {
"time_out": 5,
"local_listen_port": listen_port,
"n_estimators": 1,
"num_leaves": 2
}
# should be able to use the class without specifying a client
dask_model = model_factory(**params)
assert dask_model.client is None
with pytest.raises(lgb.compat.LGBMNotFittedError, match='Cannot access property client_ before calling fit'):
dask_model.client_
dask_model.fit(dX, dy, group=dg)
assert dask_model.fitted_
assert dask_model.client is None
assert dask_model.client_ == client
preds = dask_model.predict(dX)
assert isinstance(preds, da.Array)
assert dask_model.fitted_
assert dask_model.client is None
assert dask_model.client_ == client
local_model = dask_model.to_local()
with pytest.raises(AttributeError):
local_model.client
local_model.client_
# should be able to set client after construction
dask_model = model_factory(**params)
dask_model.set_params(client=client)
assert dask_model.client == client
with pytest.raises(lgb.compat.LGBMNotFittedError, match='Cannot access property client_ before calling fit'):
dask_model.client_
dask_model.fit(dX, dy, group=dg)
assert dask_model.fitted_
assert dask_model.client == client
assert dask_model.client_ == client
preds = dask_model.predict(dX)
assert isinstance(preds, da.Array)
assert dask_model.fitted_
assert dask_model.client == client
assert dask_model.client_ == client
local_model = dask_model.to_local()
with pytest.raises(AttributeError):
local_model.client
local_model.client_
client.close(timeout=CLIENT_CLOSE_TIMEOUT)
@pytest.mark.parametrize('serializer', ['pickle', 'joblib', 'cloudpickle'])
@pytest.mark.parametrize('task', tasks)
@pytest.mark.parametrize('set_client', [True, False])
def test_model_and_local_version_are_picklable_whether_or_not_client_set_explicitly(serializer, task, set_client, listen_port, tmp_path):
with LocalCluster(n_workers=2, threads_per_worker=1) as cluster1:
with Client(cluster1) as client1:
# data on cluster1
if task == 'ranking':
X_1, _, _, _, dX_1, dy_1, _, dg_1 = _create_ranking_data(
output='array',
group=None
)
else:
X_1, _, _, dX_1, dy_1, _ = _create_data(
objective=task,
output='array',
)
dg_1 = None
with LocalCluster(n_workers=2, threads_per_worker=1) as cluster2:
with Client(cluster2) as client2:
# create identical data on cluster2
if task == 'ranking':
X_2, _, _, _, dX_2, dy_2, _, dg_2 = _create_ranking_data(
output='array',
group=None
)
else:
X_2, _, _, dX_2, dy_2, _ = _create_data(
objective=task,
output='array',
)
dg_2 = None
if task == 'ranking':
model_factory = lgb.DaskLGBMRanker
elif task == 'classification':
model_factory = lgb.DaskLGBMClassifier
elif task == 'regression':
model_factory = lgb.DaskLGBMRegressor
params = {
"time_out": 5,
"local_listen_port": listen_port,
"n_estimators": 1,
"num_leaves": 2
}
# at this point, the result of default_client() is client2 since it was the most recently
# created. So setting client to client1 here to test that you can select a non-default client
assert default_client() == client2
if set_client:
params.update({"client": client1})
# unfitted model should survive pickling round trip, and pickling
# shouldn't have side effects on the model object
dask_model = model_factory(**params)
local_model = dask_model.to_local()
if set_client:
assert dask_model.client == client1
else:
assert dask_model.client is None
with pytest.raises(lgb.compat.LGBMNotFittedError, match='Cannot access property client_ before calling fit'):
dask_model.client_
assert "client" not in local_model.get_params()
assert getattr(local_model, "client", None) is None
tmp_file = str(tmp_path / "model-1.pkl")
_pickle(
obj=dask_model,
filepath=tmp_file,
serializer=serializer
)
model_from_disk = _unpickle(
filepath=tmp_file,
serializer=serializer
)
local_tmp_file = str(tmp_path / "local-model-1.pkl")
_pickle(
obj=local_model,
filepath=local_tmp_file,
serializer=serializer
)
local_model_from_disk = _unpickle(
filepath=local_tmp_file,
serializer=serializer
)
assert model_from_disk.client is None
if set_client:
assert dask_model.client == client1
else:
assert dask_model.client is None
with pytest.raises(lgb.compat.LGBMNotFittedError, match='Cannot access property client_ before calling fit'):
dask_model.client_
# client will always be None after unpickling
if set_client:
from_disk_params = model_from_disk.get_params()
from_disk_params.pop("client", None)
dask_params = dask_model.get_params()
dask_params.pop("client", None)
assert from_disk_params == dask_params
else:
assert model_from_disk.get_params() == dask_model.get_params()
assert local_model_from_disk.get_params() == local_model.get_params()
# fitted model should survive pickling round trip, and pickling
# shouldn't have side effects on the model object
if set_client:
dask_model.fit(dX_1, dy_1, group=dg_1)
else:
dask_model.fit(dX_2, dy_2, group=dg_2)
local_model = dask_model.to_local()
assert "client" not in local_model.get_params()
with pytest.raises(AttributeError):
local_model.client
local_model.client_
tmp_file2 = str(tmp_path / "model-2.pkl")
_pickle(
obj=dask_model,
filepath=tmp_file2,
serializer=serializer
)
fitted_model_from_disk = _unpickle(
filepath=tmp_file2,
serializer=serializer
)
local_tmp_file2 = str(tmp_path / "local-model-2.pkl")
_pickle(
obj=local_model,
filepath=local_tmp_file2,
serializer=serializer
)
local_fitted_model_from_disk = _unpickle(
filepath=local_tmp_file2,
serializer=serializer
)
if set_client:
assert dask_model.client == client1
assert dask_model.client_ == client1
else:
assert dask_model.client is None
assert dask_model.client_ == default_client()
assert dask_model.client_ == client2
assert isinstance(fitted_model_from_disk, model_factory)
assert fitted_model_from_disk.client is None
assert fitted_model_from_disk.client_ == default_client()
assert fitted_model_from_disk.client_ == client2
# client will always be None after unpickling
if set_client:
from_disk_params = fitted_model_from_disk.get_params()
from_disk_params.pop("client", None)
dask_params = dask_model.get_params()
dask_params.pop("client", None)
assert from_disk_params == dask_params
else:
assert fitted_model_from_disk.get_params() == dask_model.get_params()
assert local_fitted_model_from_disk.get_params() == local_model.get_params()
if set_client:
preds_orig = dask_model.predict(dX_1).compute()
preds_loaded_model = fitted_model_from_disk.predict(dX_1).compute()
preds_orig_local = local_model.predict(X_1)
preds_loaded_model_local = local_fitted_model_from_disk.predict(X_1)
else:
preds_orig = dask_model.predict(dX_2).compute()
preds_loaded_model = fitted_model_from_disk.predict(dX_2).compute()
preds_orig_local = local_model.predict(X_2)
preds_loaded_model_local = local_fitted_model_from_disk.predict(X_2)
assert_eq(preds_orig, preds_loaded_model)
assert_eq(preds_orig_local, preds_loaded_model_local)
def test_find_open_port_works():
worker_ip = '127.0.0.1'
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((worker_ip, 12400))
new_port = lgb.dask._find_open_port(
worker_ip=worker_ip,
local_listen_port=12400,
ports_to_skip=set()
)
assert new_port == 12401
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s_1:
s_1.bind((worker_ip, 12400))
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s_2:
s_2.bind((worker_ip, 12401))
new_port = lgb.dask._find_open_port(
worker_ip=worker_ip,
local_listen_port=12400,
ports_to_skip=set()
)
assert new_port == 12402
def test_warns_and_continues_on_unrecognized_tree_learner(client):
X = da.random.random((1e3, 10))
y = da.random.random((1e3, 1))
dask_regressor = lgb.DaskLGBMRegressor(
client=client,
time_out=5,
local_listen_port=1234,
tree_learner='some-nonsense-value',
n_estimators=1,
num_leaves=2
)
with pytest.warns(UserWarning, match='Parameter tree_learner set to some-nonsense-value'):
dask_regressor = dask_regressor.fit(X, y)
assert dask_regressor.fitted_
client.close(timeout=CLIENT_CLOSE_TIMEOUT)
def test_warns_but_makes_no_changes_for_feature_or_voting_tree_learner(client):
X = da.random.random((1e3, 10))
y = da.random.random((1e3, 1))
for tree_learner in ['feature_parallel', 'voting']:
dask_regressor = lgb.DaskLGBMRegressor(
client=client,
time_out=5,
local_listen_port=1234,
tree_learner=tree_learner,
n_estimators=1,
num_leaves=2
)
with pytest.warns(UserWarning, match='Support for tree_learner %s in lightgbm' % tree_learner):
dask_regressor = dask_regressor.fit(X, y)
assert dask_regressor.fitted_
assert dask_regressor.get_params()['tree_learner'] == tree_learner
client.close(timeout=CLIENT_CLOSE_TIMEOUT)
@gen_cluster(client=True, timeout=None)
def test_errors(c, s, a, b):
def f(part):
raise Exception('foo')
df = dd.demo.make_timeseries()
df = df.map_partitions(f, meta=df._meta)
with pytest.raises(Exception) as info:
yield lgb.dask._train(
client=c,
data=df,
label=df.x,
params={},
model_factory=lgb.LGBMClassifier
)
assert 'foo' in str(info.value)
@pytest.mark.parametrize('task', tasks)
@pytest.mark.parametrize('output', data_output)
def test_training_succeeds_even_if_some_workers_do_not_have_any_data(client, task, output):
if task == 'ranking' and output == 'scipy_csr_matrix':
pytest.skip('LGBMRanker is not currently tested on sparse matrices')
def collection_to_single_partition(collection):
"""Merge the parts of a Dask collection into a single partition."""
if collection is None:
return
if isinstance(collection, da.Array):
return collection.rechunk(*collection.shape)
return collection.repartition(npartitions=1)
if task == 'ranking':
X, y, w, g, dX, dy, dw, dg = _create_ranking_data(
output=output,
group=None
)
dask_model_factory = lgb.DaskLGBMRanker
local_model_factory = lgb.LGBMRanker
else:
X, y, w, dX, dy, dw = _create_data(
objective=task,
output=output
)
g = None
dg = None
if task == 'classification':
dask_model_factory = lgb.DaskLGBMClassifier
local_model_factory = lgb.LGBMClassifier
elif task == 'regression':
dask_model_factory = lgb.DaskLGBMRegressor
local_model_factory = lgb.LGBMRegressor
dX = collection_to_single_partition(dX)
dy = collection_to_single_partition(dy)
dw = collection_to_single_partition(dw)
dg = collection_to_single_partition(dg)
n_workers = len(client.scheduler_info()['workers'])
assert n_workers > 1
assert dX.npartitions == 1
params = {
'time_out': 5,
'random_state': 42,
'num_leaves': 10
}
dask_model = dask_model_factory(tree='data', client=client, **params)
dask_model.fit(dX, dy, group=dg, sample_weight=dw)
dask_preds = dask_model.predict(dX).compute()
local_model = local_model_factory(**params)
if task == 'ranking':
local_model.fit(X, y, group=g, sample_weight=w)
else:
local_model.fit(X, y, sample_weight=w)
local_preds = local_model.predict(X)
assert assert_eq(dask_preds, local_preds)
client.close(timeout=CLIENT_CLOSE_TIMEOUT)
@pytest.mark.parametrize(
"classes",
[
(lgb.DaskLGBMClassifier, lgb.LGBMClassifier),
(lgb.DaskLGBMRegressor, lgb.LGBMRegressor),
(lgb.DaskLGBMRanker, lgb.LGBMRanker)
]
)
def test_dask_classes_and_sklearn_equivalents_have_identical_constructors_except_client_arg(classes):
dask_spec = inspect.getfullargspec(classes[0])
sklearn_spec = inspect.getfullargspec(classes[1])
assert dask_spec.varargs == sklearn_spec.varargs
assert dask_spec.varkw == sklearn_spec.varkw
assert dask_spec.kwonlyargs == sklearn_spec.kwonlyargs
assert dask_spec.kwonlydefaults == sklearn_spec.kwonlydefaults
# "client" should be the only different, and the final argument
assert dask_spec.args[:-1] == sklearn_spec.args
assert dask_spec.defaults[:-1] == sklearn_spec.defaults
assert dask_spec.args[-1] == 'client'
assert dask_spec.defaults[-1] is None
@pytest.mark.parametrize(
"methods",
[
(lgb.DaskLGBMClassifier.fit, lgb.LGBMClassifier.fit),
(lgb.DaskLGBMClassifier.predict, lgb.LGBMClassifier.predict),
(lgb.DaskLGBMClassifier.predict_proba, lgb.LGBMClassifier.predict_proba),
(lgb.DaskLGBMRegressor.fit, lgb.LGBMRegressor.fit),
(lgb.DaskLGBMRegressor.predict, lgb.LGBMRegressor.predict),
(lgb.DaskLGBMRanker.fit, lgb.LGBMRanker.fit),
(lgb.DaskLGBMRanker.predict, lgb.LGBMRanker.predict)
]
)
def test_dask_methods_and_sklearn_equivalents_have_similar_signatures(methods):
dask_spec = inspect.getfullargspec(methods[0])
sklearn_spec = inspect.getfullargspec(methods[1])
dask_params = inspect.signature(methods[0]).parameters
sklearn_params = inspect.signature(methods[1]).parameters
assert dask_spec.args == sklearn_spec.args[:len(dask_spec.args)]
assert dask_spec.varargs == sklearn_spec.varargs
if sklearn_spec.varkw:
assert dask_spec.varkw == sklearn_spec.varkw[:len(dask_spec.varkw)]
assert dask_spec.kwonlyargs == sklearn_spec.kwonlyargs
assert dask_spec.kwonlydefaults == sklearn_spec.kwonlydefaults
for param in dask_spec.args:
error_msg = f"param '{param}' has different default values in the methods"
assert dask_params[param].default == sklearn_params[param].default, error_msg
| 1 | 28,572 | can you please change this to `_collection_to_single_partition()`, like we've done for alll other helper methods defined in this module? | microsoft-LightGBM | cpp |
@@ -24,6 +24,7 @@ namespace Thelia\Core;
use Propel\Runtime\Connection\ConnectionManagerSingle;
use Propel\Runtime\Connection\ConnectionWrapper;
use Propel\Runtime\Propel;
+use Propel\Runtime\ServiceContainer\StandardServiceContainer;
use Symfony\Component\Config\FileLocator;
use Symfony\Component\Config\Loader\LoaderInterface;
use Symfony\Component\Debug\Debug; | 1 | <?php
/*************************************************************************************/
/* This file is part of the Thelia package. */
/* */
/* Copyright (c) OpenStudio */
/* email : [email protected] */
/* web : http://www.thelia.net */
/* */
/* For the full copyright and license information, please view the LICENSE.txt */
/* file that was distributed with this source code. */
/*************************************************************************************/
namespace Thelia\Core;
/**
* Root class of Thelia
*
* It extends Symfony\Component\HttpKernel\Kernel for changing some features
*
*
* @author Manuel Raynaud <[email protected]>
*/
use Propel\Runtime\Connection\ConnectionManagerSingle;
use Propel\Runtime\Connection\ConnectionWrapper;
use Propel\Runtime\Propel;
use Symfony\Component\Config\FileLocator;
use Symfony\Component\Config\Loader\LoaderInterface;
use Symfony\Component\Debug\Debug;
use Symfony\Component\DependencyInjection\ContainerBuilder;
use Symfony\Component\DependencyInjection\Definition;
use Symfony\Component\DependencyInjection\ParameterBag\ParameterBag;
use Symfony\Component\DependencyInjection\Reference;
use Symfony\Component\Finder\Finder;
use Symfony\Component\HttpKernel\Kernel;
use Symfony\Component\Yaml\Yaml;
use Thelia\Config\DatabaseConfiguration;
use Thelia\Config\DefinePropel;
use Thelia\Core\DependencyInjection\Loader\XmlFileLoader;
use Thelia\Core\Event\TheliaEvents;
use Thelia\Core\Template\ParserInterface;
use Thelia\Core\Template\TemplateDefinition;
use Thelia\Core\Template\TemplateHelper;
use Thelia\Log\Tlog;
use Thelia\Model\Module;
use Thelia\Model\ModuleQuery;
class Thelia extends Kernel
{
const THELIA_VERSION = '2.1.0-alpha2';
public function __construct($environment, $debug)
{
parent::__construct($environment, $debug);
if ($debug) {
Debug::enable();
}
$this->initPropel();
}
public static function isInstalled()
{
return file_exists(THELIA_CONF_DIR . 'database.yml');
}
protected function initPropel()
{
if (self::isInstalled() === false) {
return ;
}
$definePropel = new DefinePropel(
new DatabaseConfiguration(),
Yaml::parse(THELIA_CONF_DIR . 'database.yml')
);
$serviceContainer = Propel::getServiceContainer();
$serviceContainer->setAdapterClass('thelia', 'mysql');
$serviceContainer->setDefaultDatasource('thelia');
$manager = new ConnectionManagerSingle();
$manager->setConfiguration($definePropel->getConfig());
$serviceContainer->setConnectionManager('thelia', $manager);
$con = Propel::getConnection(\Thelia\Model\Map\ProductTableMap::DATABASE_NAME);
$con->setAttribute(ConnectionWrapper::PROPEL_ATTR_CACHE_PREPARES, true);
if ($this->isDebug()) {
$serviceContainer->setLogger('defaultLogger', Tlog::getInstance());
$con->useDebug(true);
}
}
/**
* dispatch an event when application is boot
*/
public function boot()
{
parent::boot();
if (self::isInstalled()) {
$this->getContainer()->get("event_dispatcher")->dispatch(TheliaEvents::BOOT);
}
}
/**
* Add all module's standard templates to the parser environment
*
* @param ParserInterface $parser the parser
* @param Module $module the Module.
*/
protected function addStandardModuleTemplatesToParserEnvironment($parser, $module)
{
$stdTpls = TemplateDefinition::getStandardTemplatesSubdirsIterator();
foreach ($stdTpls as $templateType => $templateSubdirName) {
$this->addModuleTemplateToParserEnvironment($parser, $module, $templateType, $templateSubdirName);
}
}
/**
* Add a module template directory to the parser environment
*
* @param ParserInterface $parser the parser
* @param Module $module the Module.
* @param string $templateType the template type (one of the TemplateDefinition type constants)
* @param string $templateSubdirName the template subdirectory name (one of the TemplateDefinition::XXX_SUBDIR constants)
*/
protected function addModuleTemplateToParserEnvironment($parser, $module, $templateType, $templateSubdirName)
{
// Get template path
$templateDirectory = $module->getAbsoluteTemplateDirectoryPath($templateSubdirName);
try {
$templateDirBrowser = new \DirectoryIterator($templateDirectory);
$code = ucfirst($module->getCode());
/* browse the directory */
foreach ($templateDirBrowser as $templateDirContent) {
/* is it a directory which is not . or .. ? */
if ($templateDirContent->isDir() && ! $templateDirContent->isDot()) {
$parser->addMethodCall(
'addTemplateDirectory',
array(
$templateType,
$templateDirContent->getFilename(),
$templateDirContent->getPathName(),
$code
)
);
}
}
} catch (\UnexpectedValueException $ex) {
// The directory does not exists, ignore it.
}
}
/**
*
* Load some configuration
* Initialize all plugins
*
*/
protected function loadConfiguration(ContainerBuilder $container)
{
$loader = new XmlFileLoader($container, new FileLocator(THELIA_ROOT . "/core/lib/Thelia/Config/Resources"));
$finder = Finder::create()
->name('*.xml')
->depth(0)
->in(THELIA_ROOT . "/core/lib/Thelia/Config/Resources");
/** @var \SplFileInfo $file */
foreach ($finder as $file) {
$loader->load($file->getBaseName());
}
if (defined("THELIA_INSTALL_MODE") === false) {
$modules = ModuleQuery::getActivated();
$translationDirs = array();
/** @var Module $module */
foreach ($modules as $module) {
try {
$definition = new Definition();
$definition->setClass($module->getFullNamespace());
$definition->addMethodCall("setContainer", array(new Reference('service_container')));
$container->setDefinition(
"module." . $module->getCode(),
$definition
);
$compilers = call_user_func(array($module->getFullNamespace(), 'getCompilers'));
foreach ($compilers as $compiler) {
if (is_array($compiler)) {
$container->addCompilerPass($compiler[0], $compiler[1]);
} else {
$container->addCompilerPass($compiler);
}
}
$loader = new XmlFileLoader($container, new FileLocator($module->getAbsoluteConfigPath()));
$loader->load("config.xml", "module." . $module->getCode());
} catch (\Exception $e) {
Tlog::getInstance()->addError(
sprintf("Failed to load module %s: %s", $module->getCode(), $e->getMessage()),
$e
);
}
}
/** @var ParserInterface $parser */
$parser = $container->getDefinition('thelia.parser');
/** @var Module $module */
foreach ($modules as $module) {
try {
// Core module translation
if (is_dir($dir = $module->getAbsoluteI18nPath())) {
$translationDirs[$module->getTranslationDomain()] = $dir;
}
// Admin includes translation
if (is_dir($dir = $module->getAbsoluteAdminIncludesI18nPath())) {
$translationDirs[$module->getAdminIncludesTranslationDomain()] = $dir;
}
// Module back-office template, if any
$templates =
TemplateHelper::getInstance()->getList(
TemplateDefinition::BACK_OFFICE,
$module->getAbsoluteTemplateBasePath()
);
foreach ($templates as $template) {
$translationDirs[$module->getBackOfficeTemplateTranslationDomain($template->getName())] =
$module->getAbsoluteBackOfficeI18nTemplatePath($template->getName());
}
// Module front-office template, if any
$templates =
TemplateHelper::getInstance()->getList(
TemplateDefinition::FRONT_OFFICE,
$module->getAbsoluteTemplateBasePath()
);
foreach ($templates as $template) {
$translationDirs[$module->getFrontOfficeTemplateTranslationDomain($template->getName())] =
$module->getAbsoluteFrontOfficeI18nTemplatePath($template->getName());
}
$this->addStandardModuleTemplatesToParserEnvironment($parser, $module);
} catch (\Exception $e) {
Tlog::getInstance()->addError(
sprintf("Failed to load module %s: %s", $module->getCode(), $e->getMessage()),
$e
);
}
}
// Load core translation
$translationDirs['core'] = THELIA_ROOT . 'core'.DS.'lib'.DS.'Thelia'.DS.'Config'.DS.'I18n';
// Standard templates (front, back, pdf, mail)
$th = TemplateHelper::getInstance();
/** @var TemplateDefinition $templateDefinition */
foreach ($th->getStandardTemplateDefinitions() as $templateDefinition) {
if (is_dir($dir = $templateDefinition->getAbsoluteI18nPath())) {
$translationDirs[$templateDefinition->getTranslationDomain()] = $dir;
}
}
if ($translationDirs) {
$this->loadTranslation($container, $translationDirs);
}
}
}
private function loadTranslation(ContainerBuilder $container, array $dirs)
{
$translator = $container->getDefinition('thelia.translator');
foreach ($dirs as $domain => $dir) {
try {
$finder = Finder::create()
->files()
->depth(0)
->in($dir);
/** @var \DirectoryIterator $file */
foreach ($finder as $file) {
list($locale, $format) = explode('.', $file->getBaseName(), 2);
$translator->addMethodCall('addResource', array($format, (string) $file, $locale, $domain));
}
} catch (\InvalidArgumentException $ex) {
// Ignore missing I18n directories
Tlog::getInstance()->addWarning("loadTranslation: missing $dir directory");
}
}
}
/**
*
* initialize session in Request object
*
* All param must be change in Config table
*
* @param \Symfony\Component\HttpFoundation\Request $request
*/
/**
* Gets a new ContainerBuilder instance used to build the service container.
*
* @return ContainerBuilder
*/
protected function getContainerBuilder()
{
return new TheliaContainerBuilder(new ParameterBag($this->getKernelParameters()));
}
/**
* Builds the service container.
*
* @return ContainerBuilder The compiled service container
*
* @throws \RuntimeException
*/
protected function buildContainer()
{
$container = parent::buildContainer();
$this->loadConfiguration($container);
$container->customCompile();
return $container;
}
/**
* Gets the cache directory.
*
* @return string The cache directory
*
* @api
*/
public function getCacheDir()
{
if (defined('THELIA_ROOT')) {
return THELIA_CACHE_DIR.DS.$this->environment;
} else {
return parent::getCacheDir();
}
}
/**
* Gets the log directory.
*
* @return string The log directory
*
* @api
*/
public function getLogDir()
{
if (defined('THELIA_ROOT')) {
return THELIA_LOG_DIR;
} else {
return parent::getLogDir();
}
}
/**
* Returns the kernel parameters.
*
* @return array An array of kernel parameters
*/
protected function getKernelParameters()
{
$parameters = parent::getKernelParameters();
$parameters["thelia.root_dir"] = THELIA_ROOT;
$parameters["thelia.core_dir"] = THELIA_ROOT . "core/lib/Thelia";
$parameters["thelia.module_dir"] = THELIA_MODULE_DIR;
return $parameters;
}
/**
* return available bundle
*
* Part of Symfony\Component\HttpKernel\KernelInterface
*
* @return array An array of bundle instances.
*
*/
public function registerBundles()
{
$bundles = array(
/* TheliaBundle contain all the dependency injection description */
new Bundle\TheliaBundle(),
);
/**
* OTHER CORE BUNDLE CAN BE DECLARE HERE AND INITIALIZE WITH SPECIFIC CONFIGURATION
*
* HOW TO DECLARE OTHER BUNDLE ? ETC
*/
return $bundles;
}
/**
* Loads the container configuration
*
* part of Symfony\Component\HttpKernel\KernelInterface
*
* @param LoaderInterface $loader A LoaderInterface instance
*
* @api
*/
public function registerContainerConfiguration(LoaderInterface $loader)
{
//Nothing is load here but it's possible to load container configuration here.
//exemple in sf2 : $loader->load(__DIR__.'/config/config_'.$this->getEnvironment().'.yml');
}
}
| 1 | 10,747 | This class is never used, don't import it. | thelia-thelia | php |
@@ -41,6 +41,10 @@ namespace Datadog.Trace.DiagnosticListeners
void IObserver<KeyValuePair<string, object>>.OnNext(KeyValuePair<string, object> value)
{
+#if DEBUG
+ // In debug mode we allow exceptions to be catch in the test suite
+ OnNext(value.Key, value.Value);
+#else
try
{
OnNext(value.Key, value.Value); | 1 | #if !NETFRAMEWORK
using System;
using System.Collections.Generic;
using System.Diagnostics;
using Datadog.Trace.Logging;
namespace Datadog.Trace.DiagnosticListeners
{
internal abstract class DiagnosticObserver : IObserver<KeyValuePair<string, object>>
{
private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.For<DiagnosticObserver>();
/// <summary>
/// Gets the name of the <see cref="DiagnosticListener"/> that should be instrumented.
/// </summary>
/// <value>The name of the <see cref="DiagnosticListener"/> that should be instrumented.</value>
protected abstract string ListenerName { get; }
public virtual bool IsSubscriberEnabled()
{
return true;
}
public virtual IDisposable SubscribeIfMatch(DiagnosticListener diagnosticListener)
{
if (diagnosticListener.Name == ListenerName)
{
return diagnosticListener.Subscribe(this, IsEventEnabled);
}
return null;
}
void IObserver<KeyValuePair<string, object>>.OnCompleted()
{
}
void IObserver<KeyValuePair<string, object>>.OnError(Exception error)
{
}
void IObserver<KeyValuePair<string, object>>.OnNext(KeyValuePair<string, object> value)
{
try
{
OnNext(value.Key, value.Value);
}
catch (Exception ex)
{
Log.Error(ex, "Event Exception: {0}", value.Key);
}
}
protected virtual bool IsEventEnabled(string eventName)
{
return true;
}
protected abstract void OnNext(string eventName, object arg);
}
}
#endif
| 1 | 18,459 | Missing a `return`. Either way, I think it's better to add a `#if DEBUG throw;` in the catch block instead (so we keep the log in debug mode) | DataDog-dd-trace-dotnet | .cs |
@@ -43,7 +43,12 @@ module Selenium
def text
@bridge.getAlertText
end
+
+ def authenticate(username, password)
+ @bridge.setAuthentication username: username, password: password
+ accept
+ end
end # Alert
end # WebDriver
-end # Selenium
+end # Selenium | 1 | # encoding: utf-8
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
class Alert
def initialize(bridge)
@bridge = bridge
# fail fast if the alert doesn't exist
bridge.getAlertText
end
def accept
@bridge.acceptAlert
end
def dismiss
@bridge.dismissAlert
end
def send_keys(keys)
@bridge.setAlertValue keys
end
def text
@bridge.getAlertText
end
end # Alert
end # WebDriver
end # Selenium | 1 | 13,040 | Files should have an extra line at the end of them. | SeleniumHQ-selenium | rb |
@@ -104,6 +104,9 @@ namespace NLog.Config
/// </summary>
public IList<LoggingRule> LoggingRules { get; private set; }
+ internal List<LoggingRule> GetLoggingRulesThreadSafe() { lock (LoggingRules) return LoggingRules.ToList(); }
+ private void AddLoggingRulesThreadSafe(LoggingRule rule) { lock (LoggingRules) LoggingRules.Add(rule); }
+
/// <summary>
/// Gets or sets the default culture info to use as <see cref="LogEventInfo.FormatProvider"/>.
/// </summary> | 1 | //
// Copyright (c) 2004-2017 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
namespace NLog.Config
{
using System;
using System.Collections.Generic;
using System.Collections.ObjectModel;
using System.Globalization;
using System.Linq;
using JetBrains.Annotations;
using NLog.Common;
using NLog.Internal;
using NLog.Layouts;
using NLog.Targets;
/// <summary>
/// Keeps logging configuration and provides simple API
/// to modify it.
/// </summary>
///<remarks>This class is thread-safe.<c>.ToList()</c> is used for that purpose.</remarks>
public class LoggingConfiguration
{
private readonly IDictionary<string, Target> _targets =
new Dictionary<string, Target>(StringComparer.OrdinalIgnoreCase);
private List<object> _configItems = new List<object>();
/// <summary>
/// Variables defined in xml or in API. name is case case insensitive.
/// </summary>
private readonly Dictionary<string, SimpleLayout> _variables = new Dictionary<string, SimpleLayout>(StringComparer.OrdinalIgnoreCase);
/// <summary>
/// Initializes a new instance of the <see cref="LoggingConfiguration" /> class.
/// </summary>
public LoggingConfiguration()
{
LoggingRules = new List<LoggingRule>();
}
/// <summary>
/// Use the old exception log handling of NLog 3.0?
/// </summary>
/// <remarks>This method was marked as obsolete on NLog 4.1 and it may be removed in a future release.</remarks>
[Obsolete("This option will be removed in NLog 5. Marked obsolete on NLog 4.1")]
public bool ExceptionLoggingOldStyle { get; set; }
/// <summary>
/// Gets the variables defined in the configuration.
/// </summary>
public IDictionary<string, SimpleLayout> Variables => _variables;
/// <summary>
/// Gets a collection of named targets specified in the configuration.
/// </summary>
/// <returns>
/// A list of named targets.
/// </returns>
/// <remarks>
/// Unnamed targets (such as those wrapped by other targets) are not returned.
/// </remarks>
public ReadOnlyCollection<Target> ConfiguredNamedTargets => new List<Target>(_targets.Values).AsReadOnly();
/// <summary>
/// Gets the collection of file names which should be watched for changes by NLog.
/// </summary>
public virtual IEnumerable<string> FileNamesToWatch => ArrayHelper.Empty<string>();
/// <summary>
/// Gets the collection of logging rules.
/// </summary>
public IList<LoggingRule> LoggingRules { get; private set; }
/// <summary>
/// Gets or sets the default culture info to use as <see cref="LogEventInfo.FormatProvider"/>.
/// </summary>
/// <value>
/// Specific culture info or null to use <see cref="CultureInfo.CurrentCulture"/>
/// </value>
[CanBeNull]
public CultureInfo DefaultCultureInfo { get; set; }
/// <summary>
/// Gets all targets.
/// </summary>
public ReadOnlyCollection<Target> AllTargets
{
get
{
var configTargets = _configItems.OfType<Target>();
return configTargets.Concat(_targets.Values).Distinct(TargetNameComparer).ToList().AsReadOnly();
}
}
/// <summary>
/// Compare <see cref="Target"/> objects based on their name.
/// </summary>
/// <remarks>This property is use to cache the comparer object.</remarks>
private readonly static IEqualityComparer<Target> TargetNameComparer = new TargetNameEqualityComparer();
/// <summary>
/// Defines methods to support the comparison of <see cref="Target"/> objects for equality based on their name.
/// </summary>
private class TargetNameEqualityComparer : IEqualityComparer<Target>
{
public bool Equals(Target x, Target y)
{
return string.Equals(x.Name, y.Name);
}
public int GetHashCode(Target obj)
{
return (obj.Name != null ? obj.Name.GetHashCode() : 0);
}
}
/// <summary>
/// Registers the specified target object. The name of the target is read from <see cref="Target.Name"/>.
/// </summary>
/// <param name="target">
/// The target object with a non <see langword="null"/> <see cref="Target.Name"/>
/// </param>
/// <exception cref="ArgumentNullException">when <paramref name="target"/> is <see langword="null"/></exception>
public void AddTarget([NotNull] Target target)
{
if (target == null) { throw new ArgumentNullException("target"); }
AddTarget(target.Name, target);
}
/// <summary>
/// Registers the specified target object under a given name.
/// </summary>
/// <param name="name">
/// Name of the target.
/// </param>
/// <param name="target">
/// The target object.
/// </param>
/// <exception cref="ArgumentException">when <paramref name="name"/> is <see langword="null"/></exception>
/// <exception cref="ArgumentNullException">when <paramref name="target"/> is <see langword="null"/></exception>
public void AddTarget(string name, Target target)
{
if (name == null)
{
// TODO: NLog 5 - The ArgumentException should be changed to ArgumentNullException for name parameter.
throw new ArgumentException("Target name cannot be null", "name");
}
if (target == null) { throw new ArgumentNullException("target"); }
InternalLogger.Debug("Registering target {0}: {1}", name, target.GetType().FullName);
_targets[name] = target;
}
/// <summary>
/// Finds the target with the specified name.
/// </summary>
/// <param name="name">
/// The name of the target to be found.
/// </param>
/// <returns>
/// Found target or <see langword="null"/> when the target is not found.
/// </returns>
public Target FindTargetByName(string name)
{
Target value;
if (!_targets.TryGetValue(name, out value))
{
return null;
}
return value;
}
/// <summary>
/// Finds the target with the specified name and specified type.
/// </summary>
/// <param name="name">
/// The name of the target to be found.
/// </param>
/// <typeparam name="TTarget">Type of the target</typeparam>
/// <returns>
/// Found target or <see langword="null"/> when the target is not found of not of type <typeparamref name="TTarget"/>
/// </returns>
public TTarget FindTargetByName<TTarget>(string name)
where TTarget : Target
{
return FindTargetByName(name) as TTarget;
}
/// <summary>
/// Add a rule with min- and maxLevel.
/// </summary>
/// <param name="minLevel">Minimum log level needed to trigger this rule.</param>
/// <param name="maxLevel">Maximum log level needed to trigger this rule.</param>
/// <param name="targetName">Name of the target to be written when the rule matches.</param>
/// <param name="loggerNamePattern">Logger name pattern. It may include the '*' wildcard at the beginning, at the end or at both ends.</param>
public void AddRule(LogLevel minLevel, LogLevel maxLevel, string targetName, string loggerNamePattern = "*")
{
var target = FindTargetByName(targetName);
if (target == null)
{
throw new NLogRuntimeException("Target '{0}' not found", targetName);
}
AddRule(minLevel, maxLevel, target, loggerNamePattern);
}
/// <summary>
/// Add a rule with min- and maxLevel.
/// </summary>
/// <param name="minLevel">Minimum log level needed to trigger this rule.</param>
/// <param name="maxLevel">Maximum log level needed to trigger this rule.</param>
/// <param name="target">Target to be written to when the rule matches.</param>
/// <param name="loggerNamePattern">Logger name pattern. It may include the '*' wildcard at the beginning, at the end or at both ends.</param>
public void AddRule(LogLevel minLevel, LogLevel maxLevel, Target target, string loggerNamePattern = "*")
{
LoggingRules.Add(new LoggingRule(loggerNamePattern, minLevel, maxLevel, target));
}
/// <summary>
/// Add a rule for one loglevel.
/// </summary>
/// <param name="level">log level needed to trigger this rule. </param>
/// <param name="targetName">Name of the target to be written when the rule matches.</param>
/// <param name="loggerNamePattern">Logger name pattern. It may include the '*' wildcard at the beginning, at the end or at both ends.</param>
public void AddRuleForOneLevel(LogLevel level, string targetName, string loggerNamePattern = "*")
{
var target = FindTargetByName(targetName);
if (target == null)
{
throw new NLogConfigurationException("Target '{0}' not found", targetName);
}
AddRuleForOneLevel(level, target, loggerNamePattern);
}
/// <summary>
/// Add a rule for one loglevel.
/// </summary>
/// <param name="level">log level needed to trigger this rule. </param>
/// <param name="target">Target to be written to when the rule matches.</param>
/// <param name="loggerNamePattern">Logger name pattern. It may include the '*' wildcard at the beginning, at the end or at both ends.</param>
public void AddRuleForOneLevel(LogLevel level, Target target, string loggerNamePattern = "*")
{
var loggingRule = new LoggingRule(loggerNamePattern, target);
loggingRule.EnableLoggingForLevel(level);
LoggingRules.Add(loggingRule);
}
/// <summary>
/// Add a rule for alle loglevels.
/// </summary>
/// <param name="targetName">Name of the target to be written when the rule matches.</param>
/// <param name="loggerNamePattern">Logger name pattern. It may include the '*' wildcard at the beginning, at the end or at both ends.</param>
public void AddRuleForAllLevels(string targetName, string loggerNamePattern = "*")
{
var target = FindTargetByName(targetName);
if (target == null)
{
throw new NLogRuntimeException("Target '{0}' not found", targetName);
}
AddRuleForAllLevels(target, loggerNamePattern);
}
/// <summary>
/// Add a rule for alle loglevels.
/// </summary>
/// <param name="target">Target to be written to when the rule matches.</param>
/// <param name="loggerNamePattern">Logger name pattern. It may include the '*' wildcard at the beginning, at the end or at both ends.</param>
public void AddRuleForAllLevels(Target target, string loggerNamePattern = "*")
{
var loggingRule = new LoggingRule(loggerNamePattern, target);
loggingRule.EnableLoggingForLevels(LogLevel.MinLevel, LogLevel.MaxLevel);
LoggingRules.Add(loggingRule);
}
/// <summary>
/// Called by LogManager when one of the log configuration files changes.
/// </summary>
/// <returns>
/// A new instance of <see cref="LoggingConfiguration"/> that represents the updated configuration.
/// </returns>
public virtual LoggingConfiguration Reload()
{
return this;
}
/// <summary>
/// Removes the specified named target.
/// </summary>
/// <param name="name">
/// Name of the target.
/// </param>
public void RemoveTarget(string name)
{
_targets.Remove(name);
}
/// <summary>
/// Installs target-specific objects on current system.
/// </summary>
/// <param name="installationContext">The installation context.</param>
/// <remarks>
/// Installation typically runs with administrative permissions.
/// </remarks>
public void Install(InstallationContext installationContext)
{
if (installationContext == null)
{
throw new ArgumentNullException("installationContext");
}
InitializeAll();
var configItemsList = GetInstallableItems();
foreach (IInstallable installable in configItemsList)
{
installationContext.Info("Installing '{0}'", installable);
try
{
installable.Install(installationContext);
installationContext.Info("Finished installing '{0}'.", installable);
}
catch (Exception exception)
{
InternalLogger.Error(exception, "Install of '{0}' failed.", installable);
if (exception.MustBeRethrownImmediately() || installationContext.ThrowExceptions)
{
throw;
}
installationContext.Error("Install of '{0}' failed: {1}.", installable, exception);
}
}
}
/// <summary>
/// Uninstalls target-specific objects from current system.
/// </summary>
/// <param name="installationContext">The installation context.</param>
/// <remarks>
/// Uninstallation typically runs with administrative permissions.
/// </remarks>
public void Uninstall(InstallationContext installationContext)
{
if (installationContext == null)
{
throw new ArgumentNullException("installationContext");
}
InitializeAll();
var configItemsList = GetInstallableItems();
foreach (IInstallable installable in configItemsList)
{
installationContext.Info("Uninstalling '{0}'", installable);
try
{
installable.Uninstall(installationContext);
installationContext.Info("Finished uninstalling '{0}'.", installable);
}
catch (Exception exception)
{
InternalLogger.Error(exception, "Uninstall of '{0}' failed.", installable);
if (exception.MustBeRethrownImmediately())
{
throw;
}
installationContext.Error("Uninstall of '{0}' failed: {1}.", installable, exception);
}
}
}
/// <summary>
/// Closes all targets and releases any unmanaged resources.
/// </summary>
internal void Close()
{
InternalLogger.Debug("Closing logging configuration...");
var supportsInitializesList = GetSupportsInitializes();
foreach (ISupportsInitialize initialize in supportsInitializesList)
{
InternalLogger.Trace("Closing {0}", initialize);
try
{
initialize.Close();
}
catch (Exception exception)
{
InternalLogger.Warn(exception, "Exception while closing.");
if (exception.MustBeRethrown())
{
throw;
}
}
}
InternalLogger.Debug("Finished closing logging configuration.");
}
/// <summary>
/// Log to the internal (NLog) logger the information about the <see cref="Target"/> and <see
/// cref="LoggingRule"/> associated with this <see cref="LoggingConfiguration"/> instance.
/// </summary>
/// <remarks>
/// The information are only recorded in the internal logger if Debug level is enabled, otherwise nothing is
/// recorded.
/// </remarks>
internal void Dump()
{
if (!InternalLogger.IsDebugEnabled)
{
return;
}
InternalLogger.Debug("--- NLog configuration dump ---");
InternalLogger.Debug("Targets:");
var targetList = _targets.Values.ToList();
foreach (Target target in targetList)
{
InternalLogger.Debug("{0}", target);
}
InternalLogger.Debug("Rules:");
var loggingRules = LoggingRules.ToList();
foreach (LoggingRule rule in loggingRules)
{
InternalLogger.Debug("{0}", rule);
}
InternalLogger.Debug("--- End of NLog configuration dump ---");
}
/// <summary>
/// Flushes any pending log messages on all appenders.
/// </summary>
/// <param name="asyncContinuation">The asynchronous continuation.</param>
internal void FlushAllTargets(AsyncContinuation asyncContinuation)
{
InternalLogger.Trace("Flushing all targets...");
var uniqueTargets = new List<Target>();
var loggingRules = LoggingRules.ToList();
foreach (var rule in loggingRules)
{
var targetList = rule.Targets.ToList();
foreach (var target in targetList)
{
if (!uniqueTargets.Contains(target))
{
uniqueTargets.Add(target);
}
}
}
AsyncHelpers.ForEachItemInParallel(uniqueTargets, asyncContinuation, (target, cont) => target.Flush(cont));
}
/// <summary>
/// Validates the configuration.
/// </summary>
internal void ValidateConfig()
{
var roots = new List<object>();
var loggingRules = LoggingRules.ToList();
foreach (LoggingRule rule in loggingRules)
{
roots.Add(rule);
}
var targetList = _targets.Values.ToList();
foreach (Target target in targetList)
{
roots.Add(target);
}
_configItems = ObjectGraphScanner.FindReachableObjects<object>(true, roots.ToArray());
// initialize all config items starting from most nested first
// so that whenever the container is initialized its children have already been
InternalLogger.Info("Found {0} configuration items", _configItems.Count);
foreach (object o in _configItems)
{
PropertyHelper.CheckRequiredParameters(o);
}
}
internal void InitializeAll()
{
ValidateConfig();
var supportsInitializes = GetSupportsInitializes(true);
foreach (ISupportsInitialize initialize in supportsInitializes)
{
InternalLogger.Trace("Initializing {0}", initialize);
try
{
initialize.Initialize(this);
}
catch (Exception exception)
{
if (exception.MustBeRethrown())
{
throw;
}
if (LogManager.ThrowExceptions)
{
throw new NLogConfigurationException("Error during initialization of " + initialize, exception);
}
}
}
}
internal void EnsureInitialized()
{
InitializeAll();
}
private List<IInstallable> GetInstallableItems()
{
return _configItems.OfType<IInstallable>().ToList();
}
private List<ISupportsInitialize> GetSupportsInitializes(bool reverse = false)
{
var items = _configItems.OfType<ISupportsInitialize>();
if (reverse)
{
items = items.Reverse();
}
return items.ToList();
}
/// <summary>
/// Copies all variables from provided dictionary into current configuration variables.
/// </summary>
/// <param name="masterVariables">Master variables dictionary</param>
internal void CopyVariables(IDictionary<string, SimpleLayout> masterVariables)
{
foreach (var variable in masterVariables)
{
Variables[variable.Key] = variable.Value;
}
}
}
} | 1 | 16,267 | I think we could replace this (well the backing field) with `BlockingCollection` when we drop NET35? | NLog-NLog | .cs |
@@ -928,6 +928,15 @@ class GroupBy(object):
pser_or_pdf = pdf.groupby(input_groupnames)[name].apply(func)
else:
pser_or_pdf = pdf.groupby(input_groupnames).apply(func)
+ keys = set(input_groupnames)
+ should_drop_columns = (
+ isinstance(pser_or_pdf, pd.DataFrame)
+ and keys.issubset(set(pser_or_pdf.index.names))
+ and keys.issubset(set(pser_or_pdf.columns))
+ )
+ if should_drop_columns:
+ pser_or_pdf = pser_or_pdf.drop(input_groupnames, axis=1)
+
kser_or_kdf = ks.from_pandas(pser_or_pdf)
if len(pdf) <= limit:
return kser_or_kdf | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper for GroupedData to behave similar to pandas GroupBy.
"""
import sys
import inspect
from collections import Callable, OrderedDict, namedtuple
from functools import partial
from itertools import product
from typing import Any, List, Tuple, Union
import numpy as np
import pandas as pd
from pandas._libs.parsers import is_datetime64_dtype
from pandas.core.dtypes.common import is_datetime64tz_dtype
from pyspark.sql import Window, functions as F
from pyspark.sql.types import (
FloatType,
DoubleType,
NumericType,
StructField,
StructType,
StringType,
)
from pyspark.sql.functions import PandasUDFType, pandas_udf, Column
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.typedef import _infer_return_type
from databricks.koalas.frame import DataFrame
from databricks.koalas.internal import (
_InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
SPARK_DEFAULT_INDEX_NAME,
)
from databricks.koalas.missing.groupby import (
_MissingPandasLikeDataFrameGroupBy,
_MissingPandasLikeSeriesGroupBy,
)
from databricks.koalas.series import Series, _col
from databricks.koalas.config import get_option
from databricks.koalas.utils import column_labels_level, scol_for, name_like_string
from databricks.koalas.window import RollingGroupby, ExpandingGroupby
# to keep it the same as pandas
NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"])
class GroupBy(object):
"""
:ivar _kdf: The parent dataframe that is used to perform the groupby
:type _kdf: DataFrame
:ivar _groupkeys: The list of keys that will be used to perform the grouping
:type _groupkeys: List[Series]
"""
# TODO: Series support is not implemented yet.
# TODO: not all arguments are implemented comparing to Pandas' for now.
def aggregate(self, func_or_funcs=None, *args, **kwargs):
"""Aggregate using one or more operations over the specified axis.
Parameters
----------
func_or_funcs : dict, str or list
a dict mapping from column name (string) to
aggregate functions (string or list of strings).
Returns
-------
Series or DataFrame
The return can be:
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return Series or DataFrame.
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'A': [1, 1, 2, 2],
... 'B': [1, 2, 3, 4],
... 'C': [0.362, 0.227, 1.267, -0.562]},
... columns=['A', 'B', 'C'])
>>> df
A B C
0 1 1 0.362
1 1 2 0.227
2 2 3 1.267
3 2 4 -0.562
Different aggregations per column
>>> aggregated = df.groupby('A').agg({'B': 'min', 'C': 'sum'})
>>> aggregated[['B', 'C']].sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 1 0.589
2 3 0.705
>>> aggregated = df.groupby('A').agg({'B': ['min', 'max']})
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
B
min max
A
1 1 2
2 3 4
>>> aggregated = df.groupby('A').agg('min')
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 1 0.227
2 3 -0.562
>>> aggregated = df.groupby('A').agg(['min', 'max'])
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
min max min max
A
1 1 2 0.227 0.362
2 3 4 -0.562 1.267
To control the output names with different aggregations per column, Koalas
also supports 'named aggregation' or nested renaming in .agg. It can also be
used when applying multiple aggregation functions to specific columns.
>>> aggregated = df.groupby('A').agg(b_max=ks.NamedAgg(column='B', aggfunc='max'))
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
b_max
A
1 2
2 4
>>> aggregated = df.groupby('A').agg(b_max=('B', 'max'), b_min=('B', 'min'))
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
b_max b_min
A
1 2 1
2 4 3
>>> aggregated = df.groupby('A').agg(b_max=('B', 'max'), c_min=('C', 'min'))
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
b_max c_min
A
1 2 0.227
2 4 -0.562
"""
# I think current implementation of func and arguments in koalas for aggregate is different
# than pandas, later once arguments are added, this could be removed.
if func_or_funcs is None and kwargs is None:
raise ValueError("No aggregation argument or function specified.")
relabeling = func_or_funcs is None and _is_multi_agg_with_relabel(**kwargs)
if relabeling:
func_or_funcs, columns, order = _normalize_keyword_aggregation(kwargs)
if not isinstance(func_or_funcs, (str, list)):
if not isinstance(func_or_funcs, dict) or not all(
isinstance(key, (str, tuple))
and (
isinstance(value, str)
or isinstance(value, list)
and all(isinstance(v, str) for v in value)
)
for key, value in func_or_funcs.items()
):
raise ValueError(
"aggs must be a dict mapping from column name (string or tuple) "
"to aggregate functions (string or list of strings)."
)
else:
agg_cols = [col.name for col in self._agg_columns]
func_or_funcs = OrderedDict([(col, func_or_funcs) for col in agg_cols])
index_map = OrderedDict(
(SPARK_INDEX_NAME_FORMAT(i), s._internal.column_labels[0])
for i, s in enumerate(self._groupkeys)
)
kdf = DataFrame(
GroupBy._spark_groupby(self._kdf, func_or_funcs, self._groupkeys_scols, index_map)
)
if not self._as_index:
kdf = kdf.reset_index(drop=self._should_drop_index)
if relabeling:
kdf = kdf[order]
kdf.columns = columns
return kdf
agg = aggregate
@staticmethod
def _spark_groupby(kdf, func, groupkeys_scols=(), index_map=None):
assert (len(groupkeys_scols) > 0 and index_map is not None) or (
len(groupkeys_scols) == 0 and index_map is None
)
sdf = kdf._sdf
groupkey_cols = [s.alias(SPARK_INDEX_NAME_FORMAT(i)) for i, s in enumerate(groupkeys_scols)]
multi_aggs = any(isinstance(v, list) for v in func.values())
reordered = []
data_columns = []
column_labels = []
for key, value in func.items():
label = key if isinstance(key, tuple) else (key,)
for aggfunc in [value] if isinstance(value, str) else value:
name = kdf._internal.spark_column_name_for(label)
data_col = "('{0}', '{1}')".format(name, aggfunc) if multi_aggs else name
data_columns.append(data_col)
column_labels.append(tuple(list(label) + [aggfunc]) if multi_aggs else label)
if aggfunc == "nunique":
reordered.append(
F.expr("count(DISTINCT `{0}`) as `{1}`".format(name, data_col))
)
# Implement "quartiles" aggregate function for ``describe``.
elif aggfunc == "quartiles":
reordered.append(
F.expr(
"percentile_approx(`{0}`, array(0.25, 0.5, 0.75)) as `{1}`".format(
name, data_col
)
)
)
else:
reordered.append(F.expr("{1}(`{0}`) as `{2}`".format(name, aggfunc, data_col)))
sdf = sdf.groupby(*groupkey_cols).agg(*reordered)
return _InternalFrame(
spark_frame=sdf,
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
index_map=index_map,
)
def count(self):
"""
Compute count of group, excluding missing values.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
>>> df.groupby('A').count().sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 2 3
2 2 2
"""
return self._reduce_for_stat_function(F.count, only_numeric=False)
# TODO: We should fix See Also when Series implementation is finished.
def first(self):
"""
Compute first of group values.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.first, only_numeric=False)
def last(self):
"""
Compute last of group values.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
"""
return self._reduce_for_stat_function(
lambda col: F.last(col, ignorenulls=True), only_numeric=False
)
def max(self):
"""
Compute max of group values.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.max, only_numeric=False)
# TODO: examples should be updated.
def mean(self):
"""
Compute mean of groups, excluding missing values.
Returns
-------
koalas.Series or koalas.DataFrame
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 3.0 1.333333
2 4.0 1.500000
"""
return self._reduce_for_stat_function(F.mean, only_numeric=True)
def min(self):
"""
Compute min of group values.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.min, only_numeric=False)
# TODO: sync the doc and implement `ddof`.
def std(self):
"""
Compute standard deviation of groups, excluding missing values.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.stddev, only_numeric=True)
def sum(self):
"""
Compute sum of group values
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.sum, only_numeric=True)
# TODO: sync the doc and implement `ddof`.
def var(self):
"""
Compute variance of groups, excluding missing values.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.variance, only_numeric=True)
# TODO: skipna should be implemented.
def all(self):
"""
Returns True if all values in the group are truthful, else False.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'A': [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],
... 'B': [True, True, True, False, False,
... False, None, True, None, False]},
... columns=['A', 'B'])
>>> df
A B
0 1 True
1 1 True
2 2 True
3 2 False
4 3 False
5 3 False
6 4 None
7 4 True
8 5 None
9 5 False
>>> df.groupby('A').all().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
1 True
2 False
3 False
4 True
5 False
"""
return self._reduce_for_stat_function(
lambda col: F.min(F.coalesce(col.cast("boolean"), F.lit(True))), only_numeric=False
)
# TODO: skipna should be implemented.
def any(self):
"""
Returns True if any value in the group is truthful, else False.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'A': [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],
... 'B': [True, True, True, False, False,
... False, None, True, None, False]},
... columns=['A', 'B'])
>>> df
A B
0 1 True
1 1 True
2 2 True
3 2 False
4 3 False
5 3 False
6 4 None
7 4 True
8 5 None
9 5 False
>>> df.groupby('A').any().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
1 True
2 True
3 False
4 True
5 False
"""
return self._reduce_for_stat_function(
lambda col: F.max(F.coalesce(col.cast("boolean"), F.lit(False))), only_numeric=False
)
# TODO: groupby multiply columns should be implemented.
def size(self):
"""
Compute group sizes.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 2, 3, 3, 3],
... 'B': [1, 1, 2, 3, 3, 3]},
... columns=['A', 'B'])
>>> df
A B
0 1 1
1 2 1
2 2 2
3 3 3
4 3 3
5 3 3
>>> df.groupby('A').size().sort_index() # doctest: +NORMALIZE_WHITESPACE
A
1 1
2 2
3 3
Name: count, dtype: int64
>>> df.groupby(['A', 'B']).size().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
1 1 1
2 1 1
2 1
3 3 3
Name: count, dtype: int64
"""
groupkeys = self._groupkeys
groupkey_cols = [
s.alias(SPARK_INDEX_NAME_FORMAT(i)) for i, s in enumerate(self._groupkeys_scols)
]
sdf = self._kdf._sdf
sdf = sdf.groupby(*groupkey_cols).count()
if (len(self._agg_columns) > 0) and (self._have_agg_columns):
name = self._agg_columns[0]._internal.data_spark_column_names[0]
sdf = sdf.withColumnRenamed("count", name)
else:
name = "count"
internal = _InternalFrame(
spark_frame=sdf,
index_map=OrderedDict(
(SPARK_INDEX_NAME_FORMAT(i), s._internal.column_labels[0])
for i, s in enumerate(groupkeys)
),
data_spark_columns=[scol_for(sdf, name)],
)
return _col(DataFrame(internal))
def diff(self, periods=1):
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another element in the
DataFrame group (default is the element in the same column of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
Returns
-------
diffed : DataFrame or Series
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.groupby(['b']).diff().sort_index()
a c
0 NaN NaN
1 1.0 3.0
2 NaN NaN
3 NaN NaN
4 NaN NaN
5 NaN NaN
Difference with previous column in a group.
>>> df.groupby(['b'])['a'].diff().sort_index()
0 NaN
1 1.0
2 NaN
3 NaN
4 NaN
5 NaN
Name: a, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._kser._diff(periods, part_cols=sg._groupkeys_scols)
)
def cummax(self):
"""
Cumulative max for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cummax
DataFrame.cummax
Examples
--------
>>> df = ks.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cummax().sort_index()
B C
0 NaN 4
1 0.1 4
2 20.0 4
3 10.0 1
It works as below in Series.
>>> df.C.groupby(df.A).cummax().sort_index()
0 4
1 4
2 4
3 1
Name: C, dtype: int64
"""
return self._apply_series_op(
lambda sg: sg._kser._cum(F.max, True, part_cols=sg._groupkeys_scols)
)
def cummin(self):
"""
Cumulative min for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cummin
DataFrame.cummin
Examples
--------
>>> df = ks.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cummin().sort_index()
B C
0 NaN 4
1 0.1 3
2 0.1 2
3 10.0 1
It works as below in Series.
>>> df.B.groupby(df.A).cummin().sort_index()
0 NaN
1 0.1
2 0.1
3 10.0
Name: B, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._kser._cum(F.min, True, part_cols=sg._groupkeys_scols)
)
def cumprod(self):
"""
Cumulative product for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cumprod
DataFrame.cumprod
Examples
--------
>>> df = ks.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cumprod().sort_index()
B C
0 NaN 4.0
1 0.1 12.0
2 2.0 24.0
3 10.0 1.0
It works as below in Series.
>>> df.B.groupby(df.A).cumprod().sort_index()
0 NaN
1 0.1
2 2.0
3 10.0
Name: B, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._kser._cumprod(True, part_cols=sg._groupkeys_scols)
)
def cumsum(self):
"""
Cumulative sum for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cumsum
DataFrame.cumsum
Examples
--------
>>> df = ks.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cumsum().sort_index()
B C
0 NaN 4
1 0.1 7
2 20.1 9
3 10.0 1
It works as below in Series.
>>> df.B.groupby(df.A).cumsum().sort_index()
0 NaN
1 0.1
2 20.1
3 10.0
Name: B, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._kser._cum(F.sum, True, part_cols=sg._groupkeys_scols)
)
def apply(self, func):
"""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a DataFrame as its first
argument and return a DataFrame. `apply` will
then take care of combining the results back together into a single
dataframe. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Koalas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def pandas_div(x) -> ks.DataFrame[float, float]:
... return x[['B', 'C']] / x[['B', 'C']]
If the return type is specified, the output column names become
`c0, c1, c2 ... cn`. These names are positionally mapped to the returned
DataFrame in ``func``. See examples below.
.. note:: the dataframe within ``func`` is actually a pandas dataframe. Therefore,
any pandas APIs within this function is allowed.
Parameters
----------
func : callable
A callable that takes a DataFrame as its first argument, and
returns a dataframe.
Returns
-------
applied : DataFrame or Series
See Also
--------
aggregate : Apply aggregate function to the GroupBy object.
DataFrame.apply : Apply a function to a DataFrame.
Series.apply : Apply a function to a Series.
Examples
--------
>>> df = ks.DataFrame({'A': 'a a b'.split(),
... 'B': [1, 2, 3],
... 'C': [4, 6, 5]}, columns=['A', 'B', 'C'])
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Below the functions passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> def plus_min(x):
... return x + x.min()
>>> g.apply(plus_min).sort_index() # doctest: +NORMALIZE_WHITESPACE
A B C
0 aa 2 8
1 aa 3 10
2 bb 6 10
You can specify the type hint and prevent schema inference for better performance.
>>> def pandas_div(x) -> ks.DataFrame[float, float]:
... return x[['B', 'C']] / x[['B', 'C']]
>>> g.apply(pandas_div).sort_index() # doctest: +NORMALIZE_WHITESPACE
c0 c1
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
>>> def pandas_length(x) -> int:
... return len(x)
>>> g.apply(pandas_length).sort_index() # doctest: +NORMALIZE_WHITESPACE
0 1
1 2
Name: 0, dtype: int32
In case of Series, it works as below.
>>> def plus_max(x) -> ks.Series[np.int]:
... return x + x.max()
>>> df.B.groupby(df.A).apply(plus_max).sort_index()
0 6
1 3
2 4
Name: B, dtype: int32
>>> def plus_min(x):
... return x + x.min()
>>> df.B.groupby(df.A).apply(plus_min).sort_index()
0 2
1 3
2 6
Name: B, dtype: int64
You can also return a scalar value as a aggregated value of the group:
>>> def plus_max(x) -> np.int:
... return len(x)
>>> df.B.groupby(df.A).apply(plus_max).sort_index()
0 1
1 2
Name: B, dtype: int32
"""
if not isinstance(func, Callable):
raise TypeError("%s object is not callable" % type(func))
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
input_groupnames = [s.name for s in self._groupkeys]
should_return_series = False
is_series_groupby = isinstance(self, SeriesGroupBy)
if is_series_groupby:
name = self._kser.name
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
limit = get_option("compute.shortcut_limit")
pdf = self._kdf.head(limit + 1)._to_internal_pandas()
if is_series_groupby:
pser_or_pdf = pdf.groupby(input_groupnames)[name].apply(func)
else:
pser_or_pdf = pdf.groupby(input_groupnames).apply(func)
kser_or_kdf = ks.from_pandas(pser_or_pdf)
if len(pdf) <= limit:
return kser_or_kdf
kdf = kser_or_kdf
if isinstance(kser_or_kdf, ks.Series):
should_return_series = True
kdf = kser_or_kdf.to_frame()
return_schema = kdf._sdf.drop(*HIDDEN_COLUMNS).schema
else:
if not is_series_groupby and getattr(return_sig, "__origin__", None) == ks.Series:
raise TypeError(
"Series as a return type hint at frame groupby is not supported "
"currently; however got [%s]. Use DataFrame type hint instead." % return_sig
)
return_schema = _infer_return_type(func).tpe
if not isinstance(return_schema, StructType):
should_return_series = True
if is_series_groupby:
return_schema = StructType([StructField(name, return_schema)])
else:
return_schema = StructType([StructField("0", return_schema)])
def pandas_groupby_apply(pdf):
if is_series_groupby:
pdf_or_ser = pdf.groupby(input_groupnames)[name].apply(func)
else:
pdf_or_ser = pdf.groupby(input_groupnames).apply(func)
if not isinstance(pdf_or_ser, pd.DataFrame):
return pd.DataFrame(pdf_or_ser)
else:
return pdf_or_ser
sdf = GroupBy._spark_group_map_apply(
self._kdf,
pandas_groupby_apply,
self._groupkeys_scols,
return_schema,
retain_index=should_infer_schema,
)
if should_infer_schema:
# If schema is inferred, we can restore indexes too.
internal = kdf._internal.with_new_sdf(sdf)
else:
# Otherwise, it loses index.
internal = _InternalFrame(spark_frame=sdf, index_map=None)
if should_return_series:
return _col(DataFrame(internal))
else:
return DataFrame(internal)
# TODO: implement 'dropna' parameter
def filter(self, func):
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Returns
-------
filtered : DataFrame
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> df = ks.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]}, columns=['A', 'B', 'C'])
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
"""
if not isinstance(func, Callable):
raise TypeError("%s object is not callable" % type(func))
data_schema = self._kdf._sdf.drop(*HIDDEN_COLUMNS).schema
groupby_names = [s.name for s in self._groupkeys]
def pandas_filter(pdf):
return pdf.groupby(groupby_names).filter(func)
sdf = GroupBy._spark_group_map_apply(
self._kdf, pandas_filter, self._groupkeys_scols, data_schema, retain_index=True
)
return DataFrame(self._kdf._internal.with_new_sdf(sdf))
@staticmethod
def _spark_group_map_apply(kdf, func, groupkeys_scols, return_schema, retain_index):
index_columns = kdf._internal.index_spark_column_names
index_names = kdf._internal.index_names
data_columns = kdf._internal.data_spark_column_names
column_labels = kdf._internal.column_labels
def rename_output(pdf):
# TODO: This logic below was borrowed from `DataFrame.to_pandas_frame` to set the index
# within each pdf properly. we might have to deduplicate it.
import pandas as pd
if len(index_columns) > 0:
append = False
for index_field in index_columns:
drop = index_field not in data_columns
pdf = pdf.set_index(index_field, drop=drop, append=append)
append = True
pdf = pdf[data_columns]
if column_labels_level(column_labels) > 1:
pdf.columns = pd.MultiIndex.from_tuples(column_labels)
else:
pdf.columns = [None if label is None else label[0] for label in column_labels]
if len(index_names) > 0:
pdf.index.names = [
name if name is None or len(name) > 1 else name[0] for name in index_names
]
pdf = func(pdf)
if retain_index:
# If schema should be inferred, we don't restore index. Pandas seems restoring
# the index in some cases.
# When Spark output type is specified, without executing it, we don't know
# if we should restore the index or not. For instance, see the example in
# https://github.com/databricks/koalas/issues/628.
# TODO: deduplicate this logic with _InternalFrame.from_pandas
columns = pdf.columns
index = pdf.index
index_map = []
if isinstance(index, pd.MultiIndex):
if index.names is None:
index_map = [
(SPARK_INDEX_NAME_FORMAT(i), None) for i in range(len(index.levels))
]
else:
index_map = [
(SPARK_INDEX_NAME_FORMAT(i) if name is None else name, name)
for i, name in enumerate(index.names)
]
else:
index_map = [
(
index.name if index.name is not None else SPARK_DEFAULT_INDEX_NAME,
index.name,
)
]
new_index_columns = [index_column for index_column, _ in index_map]
new_data_columns = [str(col) for col in columns]
reset_index = pdf.reset_index()
reset_index.columns = new_index_columns + new_data_columns
for name, col in reset_index.iteritems():
dt = col.dtype
if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):
continue
reset_index[name] = col.replace({np.nan: None})
pdf = reset_index
# Just positionally map the column names to given schema's.
pdf = pdf.rename(columns=dict(zip(pdf.columns, return_schema.fieldNames())))
return pdf
grouped_map_func = pandas_udf(return_schema, PandasUDFType.GROUPED_MAP)(rename_output)
sdf = kdf._sdf.drop(*HIDDEN_COLUMNS)
input_groupkeys = [s for s in groupkeys_scols]
sdf = sdf.groupby(*input_groupkeys).apply(grouped_map_func)
return sdf
def rank(self, method="average", ascending=True):
"""
Provide the rank of values within each group.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
Returns
-------
DataFrame with ranking of values within each group
Examples
--------
>>> df = ks.DataFrame({
... 'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])
>>> df
a b
0 1 1
1 1 2
2 1 2
3 2 2
4 2 3
5 2 3
6 3 3
7 3 4
8 3 4
>>> df.groupby("a").rank().sort_index()
b
0 1.0
1 2.5
2 2.5
3 1.0
4 2.5
5 2.5
6 1.0
7 2.5
8 2.5
>>> df.b.groupby(df.a).rank(method='max').sort_index()
0 1.0
1 3.0
2 3.0
3 1.0
4 3.0
5 3.0
6 1.0
7 3.0
8 3.0
Name: b, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._kser._rank(method, ascending, part_cols=sg._groupkeys_scols)
)
# TODO: add axis parameter
def idxmax(self, skipna=True):
"""
Return index of first occurrence of maximum over requested axis in group.
NA/null values are excluded.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
See Also
--------
Series.idxmax
DataFrame.idxmax
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 2, 2, 3],
... 'b': [1, 2, 3, 4, 5],
... 'c': [5, 4, 3, 2, 1]}, columns=['a', 'b', 'c'])
>>> df.groupby(['a'])['b'].idxmax().sort_index() # doctest: +NORMALIZE_WHITESPACE
a
1 1
2 3
3 4
Name: b, dtype: int64
>>> df.groupby(['a']).idxmax().sort_index() # doctest: +NORMALIZE_WHITESPACE
b c
a
1 1 0
2 3 2
3 4 4
"""
if len(self._kdf._internal.index_names) != 1:
raise ValueError("idxmax only support one-level index now")
groupkeys = self._groupkeys
groupkey_cols = [
s.alias(SPARK_INDEX_NAME_FORMAT(i)) for i, s in enumerate(self._groupkeys_scols)
]
sdf = self._kdf._sdf
index = self._kdf._internal.index_spark_column_names[0]
stat_exprs = []
for kser, c in zip(self._agg_columns, self._agg_columns_scols):
name = kser._internal.data_spark_column_names[0]
if skipna:
order_column = Column(c._jc.desc_nulls_last())
else:
order_column = Column(c._jc.desc_nulls_first())
window = Window.partitionBy(groupkey_cols).orderBy(
order_column, NATURAL_ORDER_COLUMN_NAME
)
sdf = sdf.withColumn(
name, F.when(F.row_number().over(window) == 1, scol_for(sdf, index)).otherwise(None)
)
stat_exprs.append(F.max(scol_for(sdf, name)).alias(name))
sdf = sdf.groupby(*groupkey_cols).agg(*stat_exprs)
internal = _InternalFrame(
spark_frame=sdf,
index_map=OrderedDict(
(SPARK_INDEX_NAME_FORMAT(i), s._internal.column_labels[0])
for i, s in enumerate(groupkeys)
),
column_labels=[kser._internal.column_labels[0] for kser in self._agg_columns],
data_spark_columns=[
scol_for(sdf, kser._internal.data_spark_column_names[0])
for kser in self._agg_columns
],
)
return DataFrame(internal)
# TODO: add axis parameter
def idxmin(self, skipna=True):
"""
Return index of first occurrence of minimum over requested axis in group.
NA/null values are excluded.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
See Also
--------
Series.idxmin
DataFrame.idxmin
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 2, 2, 3],
... 'b': [1, 2, 3, 4, 5],
... 'c': [5, 4, 3, 2, 1]}, columns=['a', 'b', 'c'])
>>> df.groupby(['a'])['b'].idxmin().sort_index() # doctest: +NORMALIZE_WHITESPACE
a
1 0
2 2
3 4
Name: b, dtype: int64
>>> df.groupby(['a']).idxmin().sort_index() # doctest: +NORMALIZE_WHITESPACE
b c
a
1 0 1
2 2 3
3 4 4
"""
if len(self._kdf._internal.index_names) != 1:
raise ValueError("idxmin only support one-level index now")
groupkeys = self._groupkeys
groupkey_cols = [
s.alias(SPARK_INDEX_NAME_FORMAT(i)) for i, s in enumerate(self._groupkeys_scols)
]
sdf = self._kdf._sdf
index = self._kdf._internal.index_spark_column_names[0]
stat_exprs = []
for kser, c in zip(self._agg_columns, self._agg_columns_scols):
name = kser._internal.data_spark_column_names[0]
if skipna:
order_column = Column(c._jc.asc_nulls_last())
else:
order_column = Column(c._jc.asc_nulls_first())
window = Window.partitionBy(groupkey_cols).orderBy(
order_column, NATURAL_ORDER_COLUMN_NAME
)
sdf = sdf.withColumn(
name, F.when(F.row_number().over(window) == 1, scol_for(sdf, index)).otherwise(None)
)
stat_exprs.append(F.max(scol_for(sdf, name)).alias(name))
sdf = sdf.groupby(*groupkey_cols).agg(*stat_exprs)
internal = _InternalFrame(
spark_frame=sdf,
index_map=OrderedDict(
(SPARK_INDEX_NAME_FORMAT(i), s._internal.column_labels[0])
for i, s in enumerate(groupkeys)
),
column_labels=[kser._internal.column_labels[0] for kser in self._agg_columns],
data_spark_columns=[
scol_for(sdf, kser._internal.data_spark_column_names[0])
for kser in self._agg_columns
],
)
return DataFrame(internal)
def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None):
"""Fill NA/NaN values in group.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series pad / ffill: propagate last valid
observation forward to next valid backfill / bfill:
use NEXT valid observation to fill gap
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [1, 1, 2, 2],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 1 2.0 NaN 0
1 1 4.0 NaN 1
2 2 NaN NaN 5
3 2 3.0 1.0 4
We can also propagate non-null values forward or backward in group.
>>> df.groupby(['A'])['B'].fillna(method='ffill').sort_index()
0 2.0
1 4.0
2 NaN
3 3.0
Name: B, dtype: float64
>>> df.groupby(['A']).fillna(method='bfill').sort_index()
B C D
0 2.0 NaN 0
1 4.0 NaN 1
2 3.0 1.0 5
3 3.0 1.0 4
"""
return self._fillna(value, method, axis, inplace, limit)
def bfill(self, limit=None):
"""
Synonym for `DataFrame.fillna()` with ``method=`bfill```.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [1, 1, 2, 2],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 1 2.0 NaN 0
1 1 4.0 NaN 1
2 2 NaN NaN 5
3 2 3.0 1.0 4
Propagate non-null values backward.
>>> df.groupby(['A']).bfill().sort_index()
B C D
0 2.0 NaN 0
1 4.0 NaN 1
2 3.0 1.0 5
3 3.0 1.0 4
"""
return self._fillna(method="bfill", limit=limit)
backfill = bfill
def ffill(self, limit=None):
"""
Synonym for `DataFrame.fillna()` with ``method=`ffill```.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [1, 1, 2, 2],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 1 2.0 NaN 0
1 1 4.0 NaN 1
2 2 NaN NaN 5
3 2 3.0 1.0 4
Propagate non-null values forward.
>>> df.groupby(['A']).ffill().sort_index()
B C D
0 2.0 NaN 0
1 4.0 NaN 1
2 NaN NaN 5
3 3.0 1.0 4
"""
return self._fillna(method="ffill", limit=limit)
pad = ffill
def head(self, n=5):
"""
Return first n rows of each group.
Returns
-------
DataFrame or Series
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [2, 3, 1, 4, 6, 9, 8, 10, 7, 5],
... 'c': [3, 5, 2, 5, 1, 2, 6, 4, 3, 6]},
... columns=['a', 'b', 'c'],
... index=[7, 2, 4, 1, 3, 4, 9, 10, 5, 6])
>>> df
a b c
7 1 2 3
2 1 3 5
4 1 1 2
1 1 4 5
3 2 6 1
4 2 9 2
9 2 8 6
10 3 10 4
5 3 7 3
6 3 5 6
>>> df.groupby('a').head(2).sort_index()
a b c
2 1 3 5
3 2 6 1
4 2 9 2
5 3 7 3
7 1 2 3
10 3 10 4
>>> df.groupby('a')['b'].head(2).sort_index()
2 3
3 6
4 9
5 7
7 2
10 10
Name: b, dtype: int64
"""
tmp_col = "__row_number__"
sdf = self._kdf._sdf
window = Window.partitionBy(self._groupkeys_scols).orderBy(NATURAL_ORDER_COLUMN_NAME)
sdf = (
sdf.withColumn(tmp_col, F.row_number().over(window))
.filter(F.col(tmp_col) <= n)
.drop(tmp_col)
)
internal = self._kdf._internal.with_new_sdf(sdf)
return DataFrame(internal)
def shift(self, periods=1, fill_value=None):
"""
Shift each group by periods observations.
Parameters
----------
periods : integer, default 1
number of periods to shift
fill_value : optional
Returns
-------
Series or DataFrame
Object shifted within each group.
Examples
--------
>>> df = ks.DataFrame({
... 'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])
>>> df
a b
0 1 1
1 1 2
2 1 2
3 2 2
4 2 3
5 2 3
6 3 3
7 3 4
8 3 4
>>> df.groupby('a').shift().sort_index() # doctest: +SKIP
b
0 NaN
1 1.0
2 2.0
3 NaN
4 2.0
5 3.0
6 NaN
7 3.0
8 4.0
>>> df.groupby('a').shift(periods=-1, fill_value=0).sort_index() # doctest: +SKIP
b
0 2
1 2
2 0
3 3
4 3
5 0
6 4
7 4
8 0
"""
return self._apply_series_op(
lambda sg: sg._kser._shift(periods, fill_value, part_cols=sg._groupkeys_scols)
)
def transform(self, func):
"""
Apply function column-by-column to the GroupBy object.
The function passed to `transform` must take a Series as its first
argument and return a Series. The given function is executed for
each series in each grouped data.
While `transform` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Koalas offers a wide range of method that will
be much faster than using `transform` for their specific purposes, so try to
use them before reaching for `transform`.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def convert_to_string(x) -> ks.Series[str]:
... return x.apply("a string {}".format)
.. note:: the series within ``func`` is actually a pandas series. Therefore,
any pandas APIs within this function is allowed.
Parameters
----------
func : callable
A callable that takes a Series as its first argument, and
returns a Series.
Returns
-------
applied : DataFrame
See Also
--------
aggregate : Apply aggregate function to the GroupBy object.
Series.apply : Apply a function to a Series.
Examples
--------
>>> df = ks.DataFrame({'A': [0, 0, 1],
... 'B': [1, 2, 3],
... 'C': [4, 6, 5]}, columns=['A', 'B', 'C'])
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``0`` and ``1``.
Calling `transform` in various ways, we can get different grouping results:
Below the functions passed to `transform` takes a Series as
its argument and returns a Series. `transform` applies the function on each series
in each grouped data, and combine them into a new DataFrame:
>>> def convert_to_string(x) -> ks.Series[str]:
... return x.apply("a string {}".format)
>>> g.transform(convert_to_string) # doctest: +NORMALIZE_WHITESPACE
B C
0 a string 1 a string 4
1 a string 2 a string 6
2 a string 3 a string 5
>>> def plus_max(x) -> ks.Series[np.int]:
... return x + x.max()
>>> g.transform(plus_max) # doctest: +NORMALIZE_WHITESPACE
B C
0 3 10
1 4 12
2 6 10
You can omit the type hint and let Koalas infer its type.
>>> def plus_min(x):
... return x + x.min()
>>> g.transform(plus_min) # doctest: +NORMALIZE_WHITESPACE
B C
0 2 8
1 3 10
2 6 10
In case of Series, it works as below.
>>> df.B.groupby(df.A).transform(plus_max)
0 3
1 4
2 6
Name: B, dtype: int32
>>> df.B.groupby(df.A).transform(plus_min)
0 2
1 3
2 6
Name: B, dtype: int64
"""
if not isinstance(func, Callable):
raise TypeError("%s object is not callable" % type(func))
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
input_groupnames = [s.name for s in self._groupkeys]
def pandas_transform(pdf):
# pandas GroupBy.transform drops grouping columns.
pdf = pdf.drop(columns=input_groupnames)
return pdf.transform(func)
should_infer_schema = return_sig is None
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self._kdf.head(limit + 1)._to_internal_pandas()
pdf = pdf.groupby(input_groupnames).transform(func)
kdf = DataFrame(pdf)
return_schema = kdf._sdf.drop(*HIDDEN_COLUMNS).schema
if len(pdf) <= limit:
return kdf
sdf = GroupBy._spark_group_map_apply(
self._kdf, pandas_transform, self._groupkeys_scols, return_schema, retain_index=True
)
# If schema is inferred, we can restore indexes too.
internal = kdf._internal.with_new_sdf(sdf)
else:
return_type = _infer_return_type(func).tpe
data_columns = self._kdf._internal.data_spark_column_names
return_schema = StructType(
[StructField(c, return_type) for c in data_columns if c not in input_groupnames]
)
sdf = GroupBy._spark_group_map_apply(
self._kdf,
pandas_transform,
self._groupkeys_scols,
return_schema,
retain_index=False,
)
# Otherwise, it loses index.
internal = _InternalFrame(spark_frame=sdf, index_map=None)
return DataFrame(internal)
def nunique(self, dropna=True):
"""
Return DataFrame with number of distinct observations per group for each column.
Parameters
----------
dropna : boolean, default True
Don’t include NaN in the counts.
Returns
-------
nunique : DataFrame
Examples
--------
>>> df = ks.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')}, columns=['id', 'value1', 'value2'])
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique().sort_index() # doctest: +NORMALIZE_WHITESPACE
id value1 value2
id
egg 1 1 1
ham 1 1 2
spam 1 2 1
>>> df.groupby('id')['value1'].nunique().sort_index() # doctest: +NORMALIZE_WHITESPACE
id
egg 1
ham 1
spam 2
Name: value1, dtype: int64
"""
if isinstance(self, DataFrameGroupBy):
self._agg_columns = self._groupkeys + self._agg_columns
self._agg_columns_scols = self._groupkeys_scols + self._agg_columns_scols
if dropna:
stat_function = lambda col: F.countDistinct(col)
else:
stat_function = lambda col: (
F.countDistinct(col)
+ F.when(F.count(F.when(col.isNull(), 1).otherwise(None)) >= 1, 1).otherwise(0)
)
return self._reduce_for_stat_function(stat_function, only_numeric=False)
def rolling(self, window, min_periods=None):
"""
Return an rolling grouper, providing rolling
functionality per group.
.. note:: 'min_periods' in Koalas works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
See Also
--------
Series.groupby
DataFrame.groupby
"""
return RollingGroupby(self, self._groupkeys, window, min_periods=min_periods)
def expanding(self, min_periods=1):
"""
Return an expanding grouper, providing expanding
functionality per group.
.. note:: 'min_periods' in Koalas works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
See Also
--------
Series.groupby
DataFrame.groupby
"""
return ExpandingGroupby(self, self._groupkeys, min_periods=min_periods)
def _reduce_for_stat_function(self, sfun, only_numeric):
groupkey_cols = [
s.alias(SPARK_INDEX_NAME_FORMAT(i)) for i, s in enumerate(self._groupkeys_scols)
]
sdf = self._kdf._sdf
data_columns = []
column_labels = []
if len(self._agg_columns) > 0:
stat_exprs = []
for kser, c in zip(self._agg_columns, self._agg_columns_scols):
spark_type = kser.spark_type
name = kser._internal.data_spark_column_names[0]
label = kser._internal.column_labels[0]
# TODO: we should have a function that takes dataframes and converts the numeric
# types. Converting the NaNs is used in a few places, it should be in utils.
# Special handle floating point types because Spark's count treats nan as a valid
# value, whereas Pandas count doesn't include nan.
if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType):
stat_exprs.append(sfun(F.nanvl(c, F.lit(None))).alias(name))
data_columns.append(name)
column_labels.append(label)
elif isinstance(spark_type, NumericType) or not only_numeric:
stat_exprs.append(sfun(c).alias(name))
data_columns.append(name)
column_labels.append(label)
sdf = sdf.groupby(*groupkey_cols).agg(*stat_exprs)
else:
sdf = sdf.select(*groupkey_cols).distinct()
internal = _InternalFrame(
spark_frame=sdf,
index_map=OrderedDict(
(SPARK_INDEX_NAME_FORMAT(i), s._internal.column_labels[0])
for i, s in enumerate(self._groupkeys)
),
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
column_label_names=self._kdf._internal.column_label_names,
)
kdf = DataFrame(internal)
if not self._as_index:
kdf = kdf.reset_index(drop=self._should_drop_index)
return kdf
class DataFrameGroupBy(GroupBy):
def __init__(
self,
kdf: DataFrame,
by: List[Series],
as_index: bool = True,
should_drop_index: bool = False,
agg_columns: List[Union[str, Tuple[str, ...]]] = None,
):
self._kdf = kdf
self._groupkeys = by
self._groupkeys_scols = [s._scol for s in self._groupkeys]
self._as_index = as_index
self._should_drop_index = should_drop_index
self._have_agg_columns = True
if agg_columns is None:
agg_columns = [
label
for label in self._kdf._internal.column_labels
if all(not self._kdf[label]._equals(key) for key in self._groupkeys)
]
self._have_agg_columns = False
self._agg_columns = [kdf[label] for label in agg_columns]
self._agg_columns_scols = [s._scol for s in self._agg_columns]
def __getattr__(self, item: str) -> Any:
if hasattr(_MissingPandasLikeDataFrameGroupBy, item):
property_or_func = getattr(_MissingPandasLikeDataFrameGroupBy, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
return self.__getitem__(item)
def __getitem__(self, item):
if isinstance(item, str) and self._as_index:
return SeriesGroupBy(self._kdf[item], self._groupkeys)
else:
if isinstance(item, str):
item = [item]
item = [i if isinstance(i, tuple) else (i,) for i in item]
if not self._as_index:
groupkey_names = set(key.name for key in self._groupkeys)
for i in item:
name = str(i) if len(i) > 1 else i[0]
if name in groupkey_names:
raise ValueError("cannot insert {}, already exists".format(name))
return DataFrameGroupBy(
self._kdf,
self._groupkeys,
as_index=self._as_index,
agg_columns=item,
should_drop_index=self._should_drop_index,
)
def _apply_series_op(self, op):
applied = []
for column in self._agg_columns:
applied.append(op(column.groupby(self._groupkeys)))
internal = self._kdf._internal.with_new_columns(applied, keep_order=False)
return DataFrame(internal)
def _fillna(self, *args, **kwargs):
applied = []
kdf = self._kdf
for label in kdf._internal.column_labels:
if all(not self._kdf[label]._equals(key) for key in self._groupkeys):
applied.append(kdf[label].groupby(self._groupkeys)._fillna(*args, **kwargs))
internal = kdf._internal.with_new_columns(applied, keep_order=False)
return DataFrame(internal)
# TODO: Implement 'percentiles', 'include', and 'exclude' arguments.
# TODO: Add ``DataFrame.select_dtypes`` to See Also when 'include'
# and 'exclude' arguments are implemented.
def describe(self):
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
.. note:: Unlike pandas, the percentiles in Koalas are based upon
approximate percentile computation because computing percentiles
across a large dataset is extremely expensive.
Returns
-------
DataFrame
Summary statistics of the DataFrame provided.
See Also
--------
DataFrame.count
DataFrame.max
DataFrame.min
DataFrame.mean
DataFrame.std
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
>>> df
a b c
0 1 4 7
1 1 5 8
2 3 6 9
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> described = df.groupby('a').describe()
>>> described.sort_index() # doctest: +NORMALIZE_WHITESPACE
b c
count mean std min 25% 50% 75% max count mean std min 25% 50% 75% max
a
1 2.0 4.5 0.707107 4.0 4.0 4.0 5.0 5.0 2.0 7.5 0.707107 7.0 7.0 7.0 8.0 8.0
3 1.0 6.0 NaN 6.0 6.0 6.0 6.0 6.0 1.0 9.0 NaN 9.0 9.0 9.0 9.0 9.0
"""
for col in self._agg_columns:
if isinstance(col.spark_type, StringType):
raise NotImplementedError(
"DataFrameGroupBy.describe() doesn't support for string type for now"
)
kdf = self.agg(["count", "mean", "std", "min", "quartiles", "max"]).reset_index()
sdf = kdf._sdf
agg_cols = [col.name for col in self._agg_columns]
formatted_percentiles = ["25%", "50%", "75%"]
# Split "quartiles" columns into first, second, and third quartiles.
for col in agg_cols:
quartiles_col = str((col, "quartiles"))
for i, percentile in enumerate(formatted_percentiles):
sdf = sdf.withColumn(str((col, percentile)), F.col(quartiles_col)[i])
sdf = sdf.drop(quartiles_col)
# Reorder columns lexicographically by agg column followed by stats.
stats = ["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
column_labels = list(product(agg_cols, stats))
data_columns = map(str, column_labels)
# Reindex the DataFrame to reflect initial grouping and agg columns.
internal = _InternalFrame(
spark_frame=sdf,
index_map=OrderedDict(
(s._internal.data_spark_column_names[0], s._internal.column_labels[0])
for s in self._groupkeys
),
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
)
# Cast columns to ``"float64"`` to match `pandas.DataFrame.groupby`.
return DataFrame(internal).astype("float64")
class SeriesGroupBy(GroupBy):
def __init__(self, kser: Series, by: List[Series], as_index: bool = True):
self._kser = kser
self._groupkeys = by
# TODO: this class resolves the groupkeys and agg_columns always by columns names
# e.g., F.col("..."). This is because of the limitation of `SeriesGroupBy`
# implementation, which reuses the implementation in `GroupBy`.
# `SeriesGroupBy` creates another DataFrame and
# internal IDs of the columns become different. Maybe we should refactor the whole
# class in the future.
self._groupkeys_scols = [
F.col(s._internal.data_spark_column_names[0]) for s in self._groupkeys
]
self._agg_columns_scols = [
F.col(s._internal.data_spark_column_names[0]) for s in self._agg_columns
]
if not as_index:
raise TypeError("as_index=False only valid with DataFrame")
self._as_index = True
self._have_agg_columns = True
# Not used currently. It's a placeholder to match with DataFrameGroupBy.
self._should_drop_index = False
def __getattr__(self, item: str) -> Any:
if hasattr(_MissingPandasLikeSeriesGroupBy, item):
property_or_func = getattr(_MissingPandasLikeSeriesGroupBy, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
raise AttributeError(item)
def _apply_series_op(self, op):
return op(self)
def _fillna(self, *args, **kwargs):
return Series._fillna(self._kser, *args, **kwargs, part_cols=self._groupkeys_scols)
@property
def _kdf(self) -> DataFrame:
# TODO: Currently cannot handle the case when the values in current series
# and groupkeys series are different but only their names are same.
series = [self._kser] + [s for s in self._groupkeys if not s._equals(self._kser)]
return DataFrame(self._kser._kdf._internal.with_new_columns(series))
@property
def _agg_columns(self):
return [self._kser]
def _reduce_for_stat_function(self, sfun, only_numeric):
return _col(super(SeriesGroupBy, self)._reduce_for_stat_function(sfun, only_numeric))
def agg(self, *args, **kwargs):
return _MissingPandasLikeSeriesGroupBy.agg(self, *args, **kwargs)
def aggregate(self, *args, **kwargs):
return _MissingPandasLikeSeriesGroupBy.aggregate(self, *args, **kwargs)
def transform(self, func):
return _col(super(SeriesGroupBy, self).transform(func))
transform.__doc__ = GroupBy.transform.__doc__
def filter(self, *args, **kwargs):
return _MissingPandasLikeSeriesGroupBy.filter(self, *args, **kwargs)
def idxmin(self, skipna=True):
return _col(super(SeriesGroupBy, self).idxmin(skipna))
idxmin.__doc__ = GroupBy.idxmin.__doc__
def idxmax(self, skipna=True):
return _col(super(SeriesGroupBy, self).idxmax(skipna))
idxmax.__doc__ = GroupBy.idxmax.__doc__
def head(self, n=5):
return _col(super(SeriesGroupBy, self).head(n))
# TODO: add keep parameter
def nsmallest(self, n=5):
"""
Return the first n rows ordered by columns in ascending order in group.
Return the first n rows with the smallest values in columns, in ascending order.
The columns that are not specified are returned as well, but not used for ordering.
Parameters
----------
n : int
Number of items to retrieve.
See Also
--------
Series.nsmallest
DataFrame.nsmallest
databricks.koalas.Series.nsmallest
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])
>>> df.groupby(['a'])['b'].nsmallest(1).sort_index() # doctest: +NORMALIZE_WHITESPACE
a
1 0 1
2 3 2
3 6 3
Name: b, dtype: int64
"""
if len(self._kdf._internal.index_names) > 1:
raise ValueError("nsmallest do not support multi-index now")
sdf = self._kdf._sdf
name = self._agg_columns[0]._internal.data_spark_column_names[0]
window = Window.partitionBy(self._groupkeys_scols).orderBy(
scol_for(sdf, name), NATURAL_ORDER_COLUMN_NAME
)
sdf = sdf.withColumn("rank", F.row_number().over(window)).filter(F.col("rank") <= n)
internal = _InternalFrame(
spark_frame=sdf.drop(NATURAL_ORDER_COLUMN_NAME),
index_map=OrderedDict(
[
(s._internal.data_spark_column_names[0], s._internal.column_labels[0])
for s in self._groupkeys
]
+ list(self._kdf._internal.index_map.items())
),
data_spark_columns=[scol_for(sdf, name)],
)
return _col(DataFrame(internal))
# TODO: add keep parameter
def nlargest(self, n=5):
"""
Return the first n rows ordered by columns in descending order in group.
Return the first n rows with the smallest values in columns, in descending order.
The columns that are not specified are returned as well, but not used for ordering.
Parameters
----------
n : int
Number of items to retrieve.
See Also
--------
Series.nlargest
DataFrame.nlargest
databricks.koalas.Series.nlargest
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])
>>> df.groupby(['a'])['b'].nlargest(1).sort_index() # doctest: +NORMALIZE_WHITESPACE
a
1 1 2
2 4 3
3 7 4
Name: b, dtype: int64
"""
if len(self._kdf._internal.index_names) > 1:
raise ValueError("nlargest do not support multi-index now")
sdf = self._kdf._sdf
name = self._agg_columns[0]._internal.data_spark_column_names[0]
window = Window.partitionBy(self._groupkeys_scols).orderBy(
F.col(name).desc(), NATURAL_ORDER_COLUMN_NAME
)
sdf = sdf.withColumn("rank", F.row_number().over(window)).filter(F.col("rank") <= n)
internal = _InternalFrame(
spark_frame=sdf.drop(NATURAL_ORDER_COLUMN_NAME),
index_map=OrderedDict(
[
(s._internal.data_spark_column_names[0], s._internal.column_labels[0])
for s in self._groupkeys
]
+ list(self._kdf._internal.index_map.items())
),
data_spark_columns=[scol_for(sdf, name)],
)
return _col(DataFrame(internal))
# TODO: add bins, normalize parameter
def value_counts(self, sort=None, ascending=None, dropna=True):
"""
Compute group sizes.
Parameters
----------
sort : boolean, default None
Sort by frequencies.
ascending : boolean, default False
Sort in ascending order.
dropna : boolean, default True
Don't include counts of NaN.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 2, 3, 3, 3],
... 'B': [1, 1, 2, 3, 3, 3]},
... columns=['A', 'B'])
>>> df
A B
0 1 1
1 2 1
2 2 2
3 3 3
4 3 3
5 3 3
>>> df.groupby('A')['B'].value_counts().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
1 1 1
2 1 1
2 1
3 3 3
Name: B, dtype: int64
"""
groupkeys = self._groupkeys + self._agg_columns
groupkey_cols = [
s.alias(SPARK_INDEX_NAME_FORMAT(i))
for i, s in enumerate(self._groupkeys_scols + self._agg_columns_scols)
]
sdf = self._kdf._sdf
agg_column = self._agg_columns[0]._internal.data_spark_column_names[0]
sdf = sdf.groupby(*groupkey_cols).count().withColumnRenamed("count", agg_column)
if sort:
if ascending:
sdf = sdf.orderBy(F.col(agg_column).asc())
else:
sdf = sdf.orderBy(F.col(agg_column).desc())
internal = _InternalFrame(
spark_frame=sdf,
index_map=OrderedDict(
(SPARK_INDEX_NAME_FORMAT(i), s._internal.column_labels[0])
for i, s in enumerate(groupkeys)
),
data_spark_columns=[scol_for(sdf, agg_column)],
)
return _col(DataFrame(internal))
def _is_multi_agg_with_relabel(**kwargs):
"""
Check whether the kwargs pass to .agg look like multi-agg with relabling.
Parameters
----------
**kwargs : dict
Returns
-------
bool
Examples
--------
>>> _is_multi_agg_with_relabel(a='max')
False
>>> _is_multi_agg_with_relabel(a_max=('a', 'max'),
... a_min=('a', 'min'))
True
>>> _is_multi_agg_with_relabel()
False
"""
if not kwargs:
return False
return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values())
def _normalize_keyword_aggregation(kwargs):
"""
Normalize user-provided kwargs.
Transforms from the new ``Dict[str, NamedAgg]`` style kwargs
to the old OrderedDict[str, List[scalar]]].
Parameters
----------
kwargs : dict
Returns
-------
aggspec : dict
The transformed kwargs.
columns : List[str]
The user-provided keys.
order : List[Tuple[str, str]]
Pairs of the input and output column names.
Examples
--------
>>> _normalize_keyword_aggregation({'output': ('input', 'sum')})
(OrderedDict([('input', ['sum'])]), ('output',), [('input', 'sum')])
"""
# this is due to python version issue, not sure the impact on koalas
PY36 = sys.version_info >= (3, 6)
if not PY36:
kwargs = OrderedDict(sorted(kwargs.items()))
# TODO(Py35): When we drop python 3.5, change this to defaultdict(list)
aggspec = OrderedDict()
order = []
columns, pairs = list(zip(*kwargs.items()))
for column, aggfunc in pairs:
if column in aggspec:
aggspec[column].append(aggfunc)
else:
aggspec[column] = [aggfunc]
order.append((column, aggfunc))
return aggspec, columns, order
| 1 | 14,773 | Can you show before/after output in the PR description? From a cursory look, seems like it will just drop the duplicated column which isn't consistent with pandas' results. | databricks-koalas | py |
@@ -33,6 +33,14 @@ module Bolt
@rest = data.reject { |k, _| %w[name nodes config groups].include? k }
end
+ def check_deprecated_config(context, name, config)
+ if config && config['transports']
+ msg = "#{context} #{name} contains invalid config option 'transports', see " \
+ "https://puppet.com/docs/bolt/0.x/inventory_file.html for the updated format"
+ raise ValidationError.new(msg, @name)
+ end
+ end
+
def validate(used_names = Set.new, node_names = Set.new, depth = 0)
raise ValidationError.new("Group does not have a name", nil) unless @name
if used_names.include?(@name) | 1 | module Bolt
class Inventory
# Group is a specific implementation of Inventory based on nested
# structured data.
class Group
attr_accessor :name, :nodes, :groups, :config, :rest
def initialize(data)
@logger = Logging.logger[self]
@name = data['name']
@nodes = {}
if data['nodes']
data['nodes'].each do |n|
n = { 'name' => n } if n.is_a? String
if @nodes.include? n['name']
@logger.warn("Ignoring duplicate node in #{@name}: #{n}")
else
@nodes[n['name']] = n
end
end
end
@vars = data['vars'] || {}
@config = data['config'] || {}
@groups = if data['groups']
data['groups'].map { |g| Group.new(g) }
else
[]
end
# this allows arbitrary info for the top level
@rest = data.reject { |k, _| %w[name nodes config groups].include? k }
end
def validate(used_names = Set.new, node_names = Set.new, depth = 0)
raise ValidationError.new("Group does not have a name", nil) unless @name
if used_names.include?(@name)
raise ValidationError.new("Tried to redefine group #{@name}", @name)
end
raise ValidationError.new("Invalid Group name #{@name}", @name) unless @name =~ /\A[a-z0-9_]+\Z/
if node_names.include?(@name)
raise ValidationError.new("Group #{@name} conflicts with node of the same name", @name)
end
raise ValidationError.new("Group #{@name} is too deeply nested", @name) if depth > 1
used_names << @name
@nodes.each_value do |n|
# Require nodes to be parseable as a Target.
begin
Target.new(n['name'])
rescue Addressable::URI::InvalidURIError => e
@logger.debug(e)
raise ValidationError.new("Invalid node name #{n['name']}", n['name'])
end
raise ValidationError.new("Node #{n['name']} does not have a name", n['name']) unless n['name']
if used_names.include?(n['name'])
raise ValidationError.new("Group #{n['name']} conflicts with node of the same name", n['name'])
end
node_names << n['name']
end
@groups.each do |g|
begin
g.validate(used_names, node_names, depth + 1)
rescue ValidationError => e
e.add_parent(@name)
raise e
end
end
nil
end
# The data functions below expect and return nil or a hash of the schema
# { 'config' => Hash , 'vars' => Hash, groups => Array }
# As we add more options beyond config this schema will grow
def data_for(node_name)
data_merge(group_collect(node_name), node_collect(node_name))
end
def node_data(node_name)
if (data = @nodes[node_name])
{ 'config' => data['config'] || {},
'vars' => data['vars'] || {},
# groups come from group_data
'groups' => [] }
end
end
def group_data
{ 'config' => @config,
'vars' => @vars,
'groups' => [@name] }
end
def empty_data
{ 'config' => {},
'vars' => {},
'groups' => [] }
end
def data_merge(data1, data2)
if data2.nil? || data1.nil?
return data2 || data1
end
{
'config' => Bolt::Util.deep_merge(data1['config'], data2['config']),
# Shallow merge instead of deep merge so that vars with a hash value
# are assigned a new hash, rather than merging the existing value
# with the value meant to replace it
'vars' => data2['vars'].merge(data1['vars']),
'groups' => data2['groups'] + data1['groups']
}
end
# Returns all nodes contained within the group, which includes nodes from subgroups.
def node_names
@groups.inject(local_node_names) do |acc, g|
acc.merge(g.node_names)
end
end
# Return a mapping of group names to group.
def collect_groups
@groups.inject(name => self) do |acc, g|
acc.merge(g.collect_groups)
end
end
def local_node_names
Set.new(@nodes.keys)
end
private :local_node_names
def node_collect(node_name)
data = @groups.inject(nil) do |acc, g|
if (d = g.node_collect(node_name))
data_merge(d, acc)
else
acc
end
end
data_merge(node_data(node_name), data)
end
def group_collect(node_name)
data = @groups.inject(nil) do |acc, g|
if (d = g.data_for(node_name))
data_merge(d, acc)
else
acc
end
end
if data
data_merge(group_data, data)
elsif @nodes.include?(node_name)
group_data
end
end
end
end
end
| 1 | 8,045 | Added validation that `config: transports` isn't used @adreyer @katelopresti Curious what you think about having Bolt link directly to the docs site. | puppetlabs-bolt | rb |
@@ -99,6 +99,7 @@ class AbstractSettings:
_FONT_SIZES = {} # type: typing.Dict[str, typing.Any]
_FONT_FAMILIES = {} # type: typing.Dict[str, typing.Any]
_FONT_TO_QFONT = {} # type: typing.Dict[typing.Any, QFont.StyleHint]
+ _UnknownUrlSchemePolicy = {} # type: typing.Dict[str, typing.Any]
def __init__(self, settings: typing.Any) -> None:
self._settings = settings | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2020 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Bridge from QWeb(Engine)Settings to our own settings."""
import re
import typing
import argparse
import functools
import attr
from PyQt5.QtCore import QUrl, pyqtSlot, qVersion
from PyQt5.QtGui import QFont
import qutebrowser
from qutebrowser.config import config
from qutebrowser.utils import log, usertypes, urlmatch, qtutils
from qutebrowser.misc import objects, debugcachestats
UNSET = object()
@attr.s
class UserAgent:
"""A parsed user agent."""
os_info = attr.ib() # type: str
webkit_version = attr.ib() # type: str
upstream_browser_key = attr.ib() # type: str
upstream_browser_version = attr.ib() # type: str
qt_key = attr.ib() # type: str
@classmethod
def parse(cls, ua: str) -> 'UserAgent':
"""Parse a user agent string into its components."""
comment_matches = re.finditer(r'\(([^)]*)\)', ua)
os_info = list(comment_matches)[0].group(1)
version_matches = re.finditer(r'(\S+)/(\S+)', ua)
versions = {}
for match in version_matches:
versions[match.group(1)] = match.group(2)
webkit_version = versions['AppleWebKit']
if 'Chrome' in versions:
upstream_browser_key = 'Chrome'
qt_key = 'QtWebEngine'
elif 'Version' in versions:
upstream_browser_key = 'Version'
qt_key = 'Qt'
else:
raise ValueError("Invalid upstream browser key: {}".format(ua))
upstream_browser_version = versions[upstream_browser_key]
return cls(os_info=os_info,
webkit_version=webkit_version,
upstream_browser_key=upstream_browser_key,
upstream_browser_version=upstream_browser_version,
qt_key=qt_key)
class AttributeInfo:
"""Info about a settings attribute."""
def __init__(self, *attributes: typing.Any,
converter: typing.Callable = None) -> None:
self.attributes = attributes
if converter is None:
self.converter = lambda val: val
else:
self.converter = converter
class AbstractSettings:
"""Abstract base class for settings set via QWeb(Engine)Settings."""
_ATTRIBUTES = {} # type: typing.Dict[str, AttributeInfo]
_FONT_SIZES = {} # type: typing.Dict[str, typing.Any]
_FONT_FAMILIES = {} # type: typing.Dict[str, typing.Any]
_FONT_TO_QFONT = {} # type: typing.Dict[typing.Any, QFont.StyleHint]
def __init__(self, settings: typing.Any) -> None:
self._settings = settings
def set_attribute(self, name: str, value: typing.Any) -> bool:
"""Set the given QWebSettings/QWebEngineSettings attribute.
If the value is usertypes.UNSET, the value is reset instead.
Return:
True if there was a change, False otherwise.
"""
old_value = self.test_attribute(name)
info = self._ATTRIBUTES[name]
for attribute in info.attributes:
if value is usertypes.UNSET:
self._settings.resetAttribute(attribute)
new_value = self.test_attribute(name)
else:
self._settings.setAttribute(attribute, info.converter(value))
new_value = value
return old_value != new_value
def test_attribute(self, name: str) -> bool:
"""Get the value for the given attribute.
If the setting resolves to a list of attributes, only the first
attribute is tested.
"""
info = self._ATTRIBUTES[name]
return self._settings.testAttribute(info.attributes[0])
def set_font_size(self, name: str, value: int) -> bool:
"""Set the given QWebSettings/QWebEngineSettings font size.
Return:
True if there was a change, False otherwise.
"""
assert value is not usertypes.UNSET # type: ignore
family = self._FONT_SIZES[name]
old_value = self._settings.fontSize(family)
self._settings.setFontSize(family, value)
return old_value != value
def set_font_family(self, name: str, value: typing.Optional[str]) -> bool:
"""Set the given QWebSettings/QWebEngineSettings font family.
With None (the default), QFont is used to get the default font for the
family.
Return:
True if there was a change, False otherwise.
"""
assert value is not usertypes.UNSET # type: ignore
family = self._FONT_FAMILIES[name]
if value is None:
font = QFont()
font.setStyleHint(self._FONT_TO_QFONT[family])
value = font.defaultFamily()
old_value = self._settings.fontFamily(family)
self._settings.setFontFamily(family, value)
return value != old_value
def set_default_text_encoding(self, encoding: str) -> bool:
"""Set the default text encoding to use.
Return:
True if there was a change, False otherwise.
"""
assert encoding is not usertypes.UNSET # type: ignore
old_value = self._settings.defaultTextEncoding()
self._settings.setDefaultTextEncoding(encoding)
return old_value != encoding
def _update_setting(self, setting: str, value: typing.Any) -> bool:
"""Update the given setting/value.
Unknown settings are ignored.
Return:
True if there was a change, False otherwise.
"""
if setting in self._ATTRIBUTES:
return self.set_attribute(setting, value)
elif setting in self._FONT_SIZES:
return self.set_font_size(setting, value)
elif setting in self._FONT_FAMILIES:
return self.set_font_family(setting, value)
elif setting == 'content.default_encoding':
return self.set_default_text_encoding(value)
return False
def update_setting(self, setting: str) -> None:
"""Update the given setting."""
value = config.instance.get(setting)
self._update_setting(setting, value)
def update_for_url(self, url: QUrl) -> typing.Set[str]:
"""Update settings customized for the given tab.
Return:
A set of settings which actually changed.
"""
qtutils.ensure_valid(url)
changed_settings = set()
for values in config.instance:
if not values.opt.supports_pattern:
continue
value = values.get_for_url(url, fallback=False)
changed = self._update_setting(values.opt.name, value)
if changed:
log.config.debug("Changed for {}: {} = {}".format(
url.toDisplayString(), values.opt.name, value))
changed_settings.add(values.opt.name)
return changed_settings
def init_settings(self) -> None:
"""Set all supported settings correctly."""
for setting in (list(self._ATTRIBUTES) + list(self._FONT_SIZES) +
list(self._FONT_FAMILIES)):
self.update_setting(setting)
@debugcachestats.register(name='user agent cache')
@functools.lru_cache()
def _format_user_agent(template: str, backend: usertypes.Backend) -> str:
if backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webenginesettings
parsed = webenginesettings.parsed_user_agent
else:
from qutebrowser.browser.webkit import webkitsettings
parsed = webkitsettings.parsed_user_agent
assert parsed is not None
return template.format(
os_info=parsed.os_info,
webkit_version=parsed.webkit_version,
qt_key=parsed.qt_key,
qt_version=qVersion(),
upstream_browser_key=parsed.upstream_browser_key,
upstream_browser_version=parsed.upstream_browser_version,
qutebrowser_version=qutebrowser.__version__,
)
def user_agent(url: QUrl = None) -> str:
template = config.instance.get('content.headers.user_agent', url=url)
return _format_user_agent(template=template, backend=objects.backend)
def init(args: argparse.Namespace) -> None:
"""Initialize all QWeb(Engine)Settings."""
if objects.backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webenginesettings
webenginesettings.init(args)
else:
from qutebrowser.browser.webkit import webkitsettings
webkitsettings.init(args)
# Make sure special URLs always get JS support
for pattern in ['chrome://*/*', 'qute://*/*']:
config.instance.set_obj('content.javascript.enabled', True,
pattern=urlmatch.UrlPattern(pattern),
hide_userconfig=True)
@pyqtSlot()
def shutdown() -> None:
"""Shut down QWeb(Engine)Settings."""
if objects.backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webenginesettings
webenginesettings.shutdown()
else:
from qutebrowser.browser.webkit import webkitsettings
webkitsettings.shutdown()
| 1 | 24,331 | This probably isn't needed anymore now? | qutebrowser-qutebrowser | py |
@@ -3,9 +3,9 @@ import { h } from './h';
/**
* Clones the given VNode, optionally adding attributes/props and replacing its children.
- * @param {VNode} vnode The virtual DOM element to clone
- * @param {Object} props Attributes/props to add when cloning
- * @param {VNode} rest Any additional arguments will be used as replacement children.
+ * @param {import('./vnode').VNode} vnode The virtual DOM element to clone
+ * @param {object} props Attributes/props to add when cloning
+ * @param {Array<import('./vnode').VNode>} [rest] Any additional arguments will be used as replacement children.
*/
export function cloneElement(vnode, props) {
return h( | 1 | import { extend } from './util';
import { h } from './h';
/**
* Clones the given VNode, optionally adding attributes/props and replacing its children.
* @param {VNode} vnode The virtual DOM element to clone
* @param {Object} props Attributes/props to add when cloning
* @param {VNode} rest Any additional arguments will be used as replacement children.
*/
export function cloneElement(vnode, props) {
return h(
vnode.nodeName,
extend(extend({}, vnode.attributes), props),
arguments.length>2 ? [].slice.call(arguments, 2) : vnode.children
);
}
| 1 | 11,992 | Would it be easier to kick this import to the top of the file? It'll get stripped out anyway. | preactjs-preact | js |
@@ -236,7 +236,8 @@ public class MultiTenancyPrivacyController implements PrivacyController {
}
}
- private void verifyPrivacyGroupContainsEnclavePublicKey(
+ @Override
+ public void verifyPrivacyGroupContainsEnclavePublicKey(
final String privacyGroupId, final String enclavePublicKey) {
final PrivacyGroup privacyGroup = enclave.retrievePrivacyGroup(privacyGroupId);
if (!privacyGroup.getMembers().contains(enclavePublicKey)) { | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.privacy;
import org.hyperledger.besu.enclave.Enclave;
import org.hyperledger.besu.enclave.types.PrivacyGroup;
import org.hyperledger.besu.enclave.types.ReceiveResponse;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.mainnet.TransactionValidator.TransactionInvalidReason;
import org.hyperledger.besu.ethereum.mainnet.ValidationResult;
import org.hyperledger.besu.ethereum.transaction.CallParameter;
import java.math.BigInteger;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
public class MultiTenancyPrivacyController implements PrivacyController {
private final PrivacyController privacyController;
private final Enclave enclave;
private final PrivateTransactionValidator privateTransactionValidator;
public MultiTenancyPrivacyController(
final PrivacyController privacyController,
final Optional<BigInteger> chainId,
final Enclave enclave) {
this.privacyController = privacyController;
this.enclave = enclave;
privateTransactionValidator = new PrivateTransactionValidator(chainId);
}
@Override
public String sendTransaction(
final PrivateTransaction privateTransaction,
final String enclavePublicKey,
final Optional<PrivacyGroup> maybePrivacyGroup) {
verifyPrivateFromMatchesEnclavePublicKey(
privateTransaction.getPrivateFrom().toBase64String(), enclavePublicKey);
if (privateTransaction.getPrivacyGroupId().isPresent()) {
verifyPrivacyGroupContainsEnclavePublicKey(
privateTransaction.getPrivacyGroupId().get().toBase64String(), enclavePublicKey);
}
return privacyController.sendTransaction(
privateTransaction, enclavePublicKey, maybePrivacyGroup);
}
@Override
public ReceiveResponse retrieveTransaction(
final String enclaveKey, final String enclavePublicKey) {
// no validation necessary as the enclave receive only returns data for the enclave public key
return privacyController.retrieveTransaction(enclaveKey, enclavePublicKey);
}
@Override
public PrivacyGroup createPrivacyGroup(
final List<String> addresses,
final String name,
final String description,
final String enclavePublicKey) {
// no validation necessary as the enclave createPrivacyGroup fails if the addresses don't
// include the from (enclavePublicKey)
return privacyController.createPrivacyGroup(addresses, name, description, enclavePublicKey);
}
@Override
public String deletePrivacyGroup(final String privacyGroupId, final String enclavePublicKey) {
verifyPrivacyGroupContainsEnclavePublicKey(privacyGroupId, enclavePublicKey);
return privacyController.deletePrivacyGroup(privacyGroupId, enclavePublicKey);
}
@Override
public PrivacyGroup[] findPrivacyGroup(
final List<String> addresses, final String enclavePublicKey) {
if (!addresses.contains(enclavePublicKey)) {
throw new MultiTenancyValidationException(
"Privacy group addresses must contain the enclave public key");
}
final PrivacyGroup[] resultantGroups =
privacyController.findPrivacyGroup(addresses, enclavePublicKey);
return Arrays.stream(resultantGroups)
.filter(g -> g.getMembers().contains(enclavePublicKey))
.toArray(PrivacyGroup[]::new);
}
@Override
public Transaction createPrivacyMarkerTransaction(
final String transactionEnclaveKey, final PrivateTransaction privateTransaction) {
return privacyController.createPrivacyMarkerTransaction(
transactionEnclaveKey, privateTransaction);
}
@Override
public Transaction createPrivacyMarkerTransaction(
final String transactionEnclaveKey,
final PrivateTransaction privateTransaction,
final Address privacyPrecompileAddress) {
return privacyController.createPrivacyMarkerTransaction(
transactionEnclaveKey, privateTransaction, privacyPrecompileAddress);
}
@Override
public ValidationResult<TransactionInvalidReason> validatePrivateTransaction(
final PrivateTransaction privateTransaction, final String enclavePublicKey) {
final String privacyGroupId = privateTransaction.determinePrivacyGroupId().toBase64String();
verifyPrivacyGroupContainsEnclavePublicKey(privacyGroupId, enclavePublicKey);
return privateTransactionValidator.validate(
privateTransaction,
determineBesuNonce(privateTransaction.getSender(), privacyGroupId, enclavePublicKey),
true);
}
@Override
public long determineEeaNonce(
final String privateFrom,
final String[] privateFor,
final Address address,
final String enclavePublicKey) {
verifyPrivateFromMatchesEnclavePublicKey(privateFrom, enclavePublicKey);
return privacyController.determineEeaNonce(privateFrom, privateFor, address, enclavePublicKey);
}
@Override
public long determineBesuNonce(
final Address sender, final String privacyGroupId, final String enclavePublicKey) {
verifyPrivacyGroupContainsEnclavePublicKey(privacyGroupId, enclavePublicKey);
return privacyController.determineBesuNonce(sender, privacyGroupId, enclavePublicKey);
}
@Override
public Optional<PrivateTransactionProcessor.Result> simulatePrivateTransaction(
final String privacyGroupId,
final String enclavePublicKey,
final CallParameter callParams,
final long blockNumber) {
verifyPrivacyGroupContainsEnclavePublicKey(privacyGroupId, enclavePublicKey);
return privacyController.simulatePrivateTransaction(
privacyGroupId, enclavePublicKey, callParams, blockNumber);
}
@Override
public Optional<String> buildAndSendAddPayload(
final PrivateTransaction privateTransaction,
final Bytes32 privacyGroupId,
final String enclaveKey) {
verifyPrivateFromMatchesEnclavePublicKey(
privateTransaction.getPrivateFrom().toBase64String(), enclaveKey);
verifyPrivacyGroupContainsEnclavePublicKey(
privateTransaction.getPrivacyGroupId().get().toBase64String(), enclaveKey);
return privacyController.buildAndSendAddPayload(privateTransaction, privacyGroupId, enclaveKey);
}
@Override
public Optional<PrivacyGroup> retrieveOffChainPrivacyGroup(
final String privacyGroupId, final String enclavePublicKey) {
final Optional<PrivacyGroup> maybePrivacyGroup =
privacyController.retrieveOffChainPrivacyGroup(privacyGroupId, enclavePublicKey);
if (!maybePrivacyGroup.get().getMembers().contains(enclavePublicKey)) {
throw new MultiTenancyValidationException(
"Privacy group must contain the enclave public key");
}
return maybePrivacyGroup;
}
@Override
public List<PrivacyGroup> findOnChainPrivacyGroup(
final List<String> addresses, final String enclavePublicKey) {
if (!addresses.contains(enclavePublicKey)) {
throw new MultiTenancyValidationException(
"Privacy group addresses must contain the enclave public key");
}
final List<PrivacyGroup> resultantGroups =
privacyController.findOnChainPrivacyGroup(addresses, enclavePublicKey);
return resultantGroups.stream()
.filter(g -> g.getMembers().contains(enclavePublicKey))
.collect(Collectors.toList());
}
@Override
public List<PrivateTransactionWithMetadata> retrieveAddBlob(final String addDataKey) {
return privacyController.retrieveAddBlob(addDataKey);
}
@Override
public boolean isGroupAdditionTransaction(final PrivateTransaction privateTransaction) {
return privacyController.isGroupAdditionTransaction(privateTransaction);
}
@Override
public Optional<Bytes> getContractCode(
final String privacyGroupId,
final Address contractAddress,
final Hash blockHash,
final String enclavePublicKey) {
verifyPrivacyGroupContainsEnclavePublicKey(privacyGroupId, enclavePublicKey);
return privacyController.getContractCode(
privacyGroupId, contractAddress, blockHash, enclavePublicKey);
}
@Override
public Optional<PrivacyGroup> retrieveOnChainPrivacyGroup(
final Bytes privacyGroupId, final String enclavePublicKey) {
final Optional<PrivacyGroup> maybePrivacyGroup =
privacyController.retrieveOnChainPrivacyGroup(privacyGroupId, enclavePublicKey);
if (maybePrivacyGroup.isPresent()
&& !maybePrivacyGroup.get().getMembers().contains(enclavePublicKey)) {
throw new MultiTenancyValidationException(
"Privacy group must contain the enclave public key");
}
return maybePrivacyGroup;
}
private void verifyPrivateFromMatchesEnclavePublicKey(
final String privateFrom, final String enclavePublicKey) {
if (!privateFrom.equals(enclavePublicKey)) {
throw new MultiTenancyValidationException(
"Transaction privateFrom must match enclave public key");
}
}
private void verifyPrivacyGroupContainsEnclavePublicKey(
final String privacyGroupId, final String enclavePublicKey) {
final PrivacyGroup privacyGroup = enclave.retrievePrivacyGroup(privacyGroupId);
if (!privacyGroup.getMembers().contains(enclavePublicKey)) {
throw new MultiTenancyValidationException(
"Privacy group must contain the enclave public key");
}
}
}
| 1 | 22,141 | are you sure that the privacyGroup cannot be null? | hyperledger-besu | java |
@@ -120,7 +120,7 @@ def GenerateDepictionMatching2DStructure(mol, reference, confId=-1, referencePat
if not referenceMatch:
raise ValueError("Reference does not map to itself")
else:
- referenceMatch = range(reference.GetNumAtoms(onlyExplicit=True))
+ referenceMatch = list(range(reference.GetNumAtoms(onlyExplicit=True)))
if referencePattern:
match = mol.GetSubstructMatch(referencePattern)
else: | 1 | # $Id$
#
# Copyright (C) 2006-2011 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Import all RDKit chemistry modules
"""
from rdkit import rdBase
from rdkit import RDConfig
from rdkit import DataStructs
from rdkit.Geometry import rdGeometry
from rdkit.Chem import *
from rdkit.Chem.rdPartialCharges import *
from rdkit.Chem.rdDepictor import *
from rdkit.Chem.rdForceFieldHelpers import *
from rdkit.Chem.ChemicalFeatures import *
from rdkit.Chem.rdDistGeom import *
from rdkit.Chem.rdMolAlign import *
from rdkit.Chem.rdMolTransforms import *
from rdkit.Chem.rdShapeHelpers import *
from rdkit.Chem.rdChemReactions import *
from rdkit.Chem.rdReducedGraphs import *
try:
from rdkit.Chem.rdSLNParse import *
except ImportError:
pass
from rdkit.Chem.rdMolDescriptors import *
from rdkit.Chem.rdqueries import *
from rdkit import ForceField
Mol.Compute2DCoords = Compute2DCoords
Mol.ComputeGasteigerCharges = ComputeGasteigerCharges
import numpy, os
from rdkit.RDLogger import logger
logger = logger()
import warnings
def TransformMol(mol, tform, confId=-1, keepConfs=False):
""" Applies the transformation (usually a 4x4 double matrix) to a molecule
if keepConfs is False then all but that conformer are removed
"""
refConf = mol.GetConformer(confId)
TransformConformer(refConf, tform)
if not keepConfs:
if confId == -1:
confId = 0
allConfIds = [c.GetId() for c in mol.GetConformers()]
for id in allConfIds:
if not id == confId:
mol.RemoveConformer(id)
#reset the conf Id to zero since there is only one conformer left
mol.GetConformer(confId).SetId(0)
def ComputeMolShape(mol, confId=-1, boxDim=(20, 20, 20), spacing=0.5, **kwargs):
""" returns a grid representation of the molecule's shape
"""
res = rdGeometry.UniformGrid3D(boxDim[0], boxDim[1], boxDim[2], spacing=spacing)
EncodeShape(mol, res, confId, **kwargs)
return res
def ComputeMolVolume(mol, confId=-1, gridSpacing=0.2, boxMargin=2.0):
""" Calculates the volume of a particular conformer of a molecule
based on a grid-encoding of the molecular shape.
"""
mol = rdchem.Mol(mol)
conf = mol.GetConformer(confId)
CanonicalizeConformer(conf)
box = ComputeConfBox(conf)
sideLen = ( box[1].x-box[0].x + 2*boxMargin, \
box[1].y-box[0].y + 2*boxMargin, \
box[1].z-box[0].z + 2*boxMargin )
shape = rdGeometry.UniformGrid3D(sideLen[0], sideLen[1], sideLen[2], spacing=gridSpacing)
EncodeShape(mol, shape, confId, ignoreHs=False, vdwScale=1.0)
voxelVol = gridSpacing**3
occVect = shape.GetOccupancyVect()
voxels = [1 for x in occVect if x == 3]
vol = voxelVol * len(voxels)
return vol
def GenerateDepictionMatching2DStructure(mol, reference, confId=-1, referencePattern=None,
acceptFailure=False, **kwargs):
""" Generates a depiction for a molecule where a piece of the molecule
is constrained to have the same coordinates as a reference.
This is useful for, for example, generating depictions of SAR data
sets so that the cores of the molecules are all oriented the same
way.
Arguments:
- mol: the molecule to be aligned, this will come back
with a single conformer.
- reference: a molecule with the reference atoms to align to;
this should have a depiction.
- confId: (optional) the id of the reference conformation to use
- referencePattern: (optional) an optional molecule to be used to
generate the atom mapping between the molecule
and the reference.
- acceptFailure: (optional) if True, standard depictions will be generated
for molecules that don't have a substructure match to the
reference; if False, a ValueError will be raised
"""
if reference and referencePattern:
if not reference.GetNumAtoms(onlyExplicit=True) == referencePattern.GetNumAtoms(
onlyExplicit=True):
raise ValueError(
'When a pattern is provided, it must have the same number of atoms as the reference')
referenceMatch = reference.GetSubstructMatch(referencePattern)
if not referenceMatch:
raise ValueError("Reference does not map to itself")
else:
referenceMatch = range(reference.GetNumAtoms(onlyExplicit=True))
if referencePattern:
match = mol.GetSubstructMatch(referencePattern)
else:
match = mol.GetSubstructMatch(reference)
if not match:
if not acceptFailure:
raise ValueError('Substructure match with reference not found.')
else:
coordMap = {}
else:
conf = reference.GetConformer()
coordMap = {}
for i, idx in enumerate(match):
pt3 = conf.GetAtomPosition(referenceMatch[i])
pt2 = rdGeometry.Point2D(pt3.x, pt3.y)
coordMap[idx] = pt2
Compute2DCoords(mol, clearConfs=True, coordMap=coordMap, canonOrient=False)
def GenerateDepictionMatching3DStructure(mol, reference, confId=-1, **kwargs):
""" Generates a depiction for a molecule where a piece of the molecule
is constrained to have coordinates similar to those of a 3D reference
structure.
Arguments:
- mol: the molecule to be aligned, this will come back
with a single conformer.
- reference: a molecule with the reference atoms to align to;
this should have a depiction.
- confId: (optional) the id of the reference conformation to use
"""
nAts = mol.GetNumAtoms()
dm = []
conf = reference.GetConformer(confId)
for i in range(nAts):
pi = conf.GetAtomPosition(i)
#npi.z=0
for j in range(i + 1, nAts):
pj = conf.GetAtomPosition(j)
#pj.z=0
dm.append((pi - pj).Length())
dm = numpy.array(dm)
Compute2DCoordsMimicDistmat(mol, dm, **kwargs)
def GetBestRMS(ref, probe, refConfId=-1, probeConfId=-1, maps=None):
""" Returns the optimal RMS for aligning two molecules, taking
symmetry into account. As a side-effect, the probe molecule is
left in the aligned state.
Arguments:
- ref: the reference molecule
- probe: the molecule to be aligned to the reference
- refConfId: (optional) reference conformation to use
- probeConfId: (optional) probe conformation to use
- maps: (optional) a list of lists of (probeAtomId,refAtomId)
tuples with the atom-atom mappings of the two molecules.
If not provided, these will be generated using a substructure
search.
Note:
This function will attempt to align all permutations of matching atom
orders in both molecules, for some molecules it will lead to 'combinatorial
explosion' especially if hydrogens are present.
Use 'rdkit.Chem.AllChem.AlignMol' to align molecules without changing the
atom order.
"""
if not maps:
matches = ref.GetSubstructMatches(probe, uniquify=False)
if not matches:
raise ValueError('mol %s does not match mol %s' % (ref.GetProp('_Name'),
probe.GetProp('_Name')))
if len(matches) > 1e6:
warnings.warn("{} matches detected for molecule {}, this may lead to a performance slowdown.".
format(len(matches), probe.GetProp('_Name')))
maps = [list(enumerate(match)) for match in matches]
bestRMS = 1000.
for amap in maps:
rms = AlignMol(probe, ref, probeConfId, refConfId, atomMap=amap)
if rms < bestRMS:
bestRMS = rms
bestMap = amap
# finally repeate the best alignment :
if bestMap != amap:
AlignMol(probe, ref, probeConfId, refConfId, atomMap=bestMap)
return bestRMS
def GetConformerRMS(mol, confId1, confId2, atomIds=None, prealigned=False):
""" Returns the RMS between two conformations.
By default, the conformers will be aligned to the first conformer
of the molecule (i.e. the reference) before RMS calculation and,
as a side-effect, will be left in the aligned state.
Arguments:
- mol: the molecule
- confId1: the id of the first conformer
- confId2: the id of the second conformer
- atomIds: (optional) list of atom ids to use a points for
alingment - defaults to all atoms
- prealigned: (optional) by default the conformers are assumed
be unaligned and will therefore be aligned to the
first conformer
"""
# align the conformers if necessary
# Note: the reference conformer is always the first one
if not prealigned:
if atomIds:
AlignMolConformers(mol, confIds=[confId1, confId2], atomIds=atomIds)
else:
AlignMolConformers(mol, confIds=[confId1, confId2])
# calculate the RMS between the two conformations
conf1 = mol.GetConformer(id=confId1)
conf2 = mol.GetConformer(id=confId2)
ssr = 0
for i in range(mol.GetNumAtoms()):
d = conf1.GetAtomPosition(i).Distance(conf2.GetAtomPosition(i))
ssr += d * d
ssr /= mol.GetNumAtoms()
return numpy.sqrt(ssr)
def GetConformerRMSMatrix(mol, atomIds=None, prealigned=False):
""" Returns the RMS matrix of the conformers of a molecule.
As a side-effect, the conformers will be aligned to the first
conformer (i.e. the reference) and will left in the aligned state.
Arguments:
- mol: the molecule
- atomIds: (optional) list of atom ids to use a points for
alingment - defaults to all atoms
- prealigned: (optional) by default the conformers are assumed
be unaligned and will therefore be aligned to the
first conformer
Note that the returned RMS matrix is symmetrically, i.e. it is the
lower half of the matrix, e.g. for 5 conformers:
rmsmatrix = [ a,
b, c,
d, e, f,
g, h, i, j]
This way it can be directly used as distance matrix in e.g. Butina
clustering.
"""
# if necessary, align the conformers
# Note: the reference conformer is always the first one
rmsvals = []
if not prealigned:
if atomIds:
AlignMolConformers(mol, atomIds=atomIds, RMSlist=rmsvals)
else:
AlignMolConformers(mol, RMSlist=rmsvals)
else: # already prealigned
for i in range(1, mol.GetNumConformers()):
rmsvals.append(GetConformerRMS(mol, 0, i, atomIds=atomIds, prealigned=prealigned))
# loop over the conformations (except the reference one)
cmat = []
for i in range(1, mol.GetNumConformers()):
cmat.append(rmsvals[i - 1])
for j in range(1, i):
cmat.append(GetConformerRMS(mol, i, j, atomIds=atomIds, prealigned=True))
return cmat
def EnumerateLibraryFromReaction(reaction, sidechainSets):
""" Returns a generator for the virtual library defined by
a reaction and a sequence of sidechain sets
>>> from rdkit import Chem
>>> from rdkit.Chem import AllChem
>>> s1=[Chem.MolFromSmiles(x) for x in ('NC','NCC')]
>>> s2=[Chem.MolFromSmiles(x) for x in ('OC=O','OC(=O)C')]
>>> rxn = AllChem.ReactionFromSmarts('[O:2]=[C:1][OH].[N:3]>>[O:2]=[C:1][N:3]')
>>> r = AllChem.EnumerateLibraryFromReaction(rxn,[s2,s1])
>>> [Chem.MolToSmiles(x[0]) for x in list(r)]
['CNC=O', 'CCNC=O', 'CNC(C)=O', 'CCNC(C)=O']
Note that this is all done in a lazy manner, so "infinitely" large libraries can
be done without worrying about running out of memory. Your patience will run out first:
Define a set of 10000 amines:
>>> amines = (Chem.MolFromSmiles('N'+'C'*x) for x in range(10000))
... a set of 10000 acids
>>> acids = (Chem.MolFromSmiles('OC(=O)'+'C'*x) for x in range(10000))
... now the virtual library (1e8 compounds in principle):
>>> r = AllChem.EnumerateLibraryFromReaction(rxn,[acids,amines])
... look at the first 4 compounds:
>>> [Chem.MolToSmiles(next(r)[0]) for x in range(4)]
['NC=O', 'CNC=O', 'CCNC=O', 'CCCNC=O']
"""
if len(sidechainSets) != reaction.GetNumReactantTemplates():
raise ValueError('%d sidechains provided, %d required' %
(len(sidechainSets), reaction.GetNumReactantTemplates()))
def _combiEnumerator(items, depth=0):
for item in items[depth]:
if depth + 1 < len(items):
v = _combiEnumerator(items, depth + 1)
for entry in v:
l = [item]
l.extend(entry)
yield l
else:
yield [item]
for chains in _combiEnumerator(sidechainSets):
prodSets = reaction.RunReactants(chains)
for prods in prodSets:
yield prods
def ConstrainedEmbed(mol, core, useTethers=True, coreConfId=-1, randomseed=2342,
getForceField=UFFGetMoleculeForceField, **kwargs):
""" generates an embedding of a molecule where part of the molecule
is constrained to have particular coordinates
Arguments
- mol: the molecule to embed
- core: the molecule to use as a source of constraints
- useTethers: (optional) if True, the final conformation will be
optimized subject to a series of extra forces that pull the
matching atoms to the positions of the core atoms. Otherwise
simple distance constraints based on the core atoms will be
used in the optimization.
- coreConfId: (optional) id of the core conformation to use
- randomSeed: (optional) seed for the random number generator
An example, start by generating a template with a 3D structure:
>>> from rdkit.Chem import AllChem
>>> template = AllChem.MolFromSmiles("c1nn(Cc2ccccc2)cc1")
>>> AllChem.EmbedMolecule(template)
0
>>> AllChem.UFFOptimizeMolecule(template)
0
Here's a molecule:
>>> mol = AllChem.MolFromSmiles("c1nn(Cc2ccccc2)cc1-c3ccccc3")
Now do the constrained embedding
>>> newmol=AllChem.ConstrainedEmbed(mol, template)
Demonstrate that the positions are the same:
>>> newp=newmol.GetConformer().GetAtomPosition(0)
>>> molp=mol.GetConformer().GetAtomPosition(0)
>>> list(newp-molp)==[0.0,0.0,0.0]
True
>>> newp=newmol.GetConformer().GetAtomPosition(1)
>>> molp=mol.GetConformer().GetAtomPosition(1)
>>> list(newp-molp)==[0.0,0.0,0.0]
True
"""
match = mol.GetSubstructMatch(core)
if not match:
raise ValueError("molecule doesn't match the core")
coordMap = {}
coreConf = core.GetConformer(coreConfId)
for i, idxI in enumerate(match):
corePtI = coreConf.GetAtomPosition(i)
coordMap[idxI] = corePtI
ci = EmbedMolecule(mol, coordMap=coordMap, randomSeed=randomseed, **kwargs)
if ci < 0:
raise ValueError('Could not embed molecule.')
algMap = [(j, i) for i, j in enumerate(match)]
if not useTethers:
# clean up the conformation
ff = getForceField(mol, confId=0)
for i, idxI in enumerate(match):
for j in range(i + 1, len(match)):
idxJ = match[j]
d = coordMap[idxI].Distance(coordMap[idxJ])
ff.AddDistanceConstraint(idxI, idxJ, d, d, 100.)
ff.Initialize()
n = 4
more = ff.Minimize()
while more and n:
more = ff.Minimize()
n -= 1
# rotate the embedded conformation onto the core:
rms = AlignMol(mol, core, atomMap=algMap)
else:
# rotate the embedded conformation onto the core:
rms = AlignMol(mol, core, atomMap=algMap)
ff = getForceField(mol, confId=0)
conf = core.GetConformer()
for i in range(core.GetNumAtoms()):
p = conf.GetAtomPosition(i)
pIdx = ff.AddExtraPoint(p.x, p.y, p.z, fixed=True) - 1
ff.AddDistanceConstraint(pIdx, match[i], 0, 0, 100.)
ff.Initialize()
n = 4
more = ff.Minimize(energyTol=1e-4, forceTol=1e-3)
while more and n:
more = ff.Minimize(energyTol=1e-4, forceTol=1e-3)
n -= 1
# realign
rms = AlignMol(mol, core, atomMap=algMap)
mol.SetProp('EmbedRMS', str(rms))
return mol
def AssignBondOrdersFromTemplate(refmol, mol):
""" assigns bond orders to a molecule based on the
bond orders in a template molecule
Arguments
- refmol: the template molecule
- mol: the molecule to assign bond orders to
An example, start by generating a template from a SMILES
and read in the PDB structure of the molecule
>>> from rdkit.Chem import AllChem
>>> template = AllChem.MolFromSmiles("CN1C(=NC(C1=O)(c2ccccc2)c3ccccc3)N")
>>> mol = AllChem.MolFromPDBFile(os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', '4DJU_lig.pdb'))
>>> len([1 for b in template.GetBonds() if b.GetBondTypeAsDouble() == 1.0])
8
>>> len([1 for b in mol.GetBonds() if b.GetBondTypeAsDouble() == 1.0])
22
Now assign the bond orders based on the template molecule
>>> newMol = AllChem.AssignBondOrdersFromTemplate(template, mol)
>>> len([1 for b in newMol.GetBonds() if b.GetBondTypeAsDouble() == 1.0])
8
Note that the template molecule should have no explicit hydrogens
else the algorithm will fail.
It also works if there are different formal charges (this was github issue 235):
>>> template=AllChem.MolFromSmiles('CN(C)C(=O)Cc1ccc2c(c1)NC(=O)c3ccc(cc3N2)c4ccc(c(c4)OC)[N+](=O)[O-]')
>>> mol = AllChem.MolFromMolFile(os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', '4FTR_lig.mol'))
>>> AllChem.MolToSmiles(mol)
'COC1CC(C2CCC3C(O)NC4CC(CC(O)N(C)C)CCC4NC3C2)CCC1N(O)O'
>>> newMol = AllChem.AssignBondOrdersFromTemplate(template, mol)
>>> AllChem.MolToSmiles(newMol)
'COc1cc(-c2ccc3c(c2)Nc2ccc(CC(=O)N(C)C)cc2NC3=O)ccc1[N+](=O)[O-]'
"""
refmol2 = rdchem.Mol(refmol)
mol2 = rdchem.Mol(mol)
# do the molecules match already?
matching = mol2.GetSubstructMatch(refmol2)
if not matching: # no, they don't match
# check if bonds of mol are SINGLE
for b in mol2.GetBonds():
if b.GetBondType() != BondType.SINGLE:
b.SetBondType(BondType.SINGLE)
b.SetIsAromatic(False)
# set the bonds of mol to SINGLE
for b in refmol2.GetBonds():
b.SetBondType(BondType.SINGLE)
b.SetIsAromatic(False)
# set atom charges to zero;
for a in refmol2.GetAtoms():
a.SetFormalCharge(0)
for a in mol2.GetAtoms():
a.SetFormalCharge(0)
matching = mol2.GetSubstructMatches(refmol2, uniquify=False)
# do the molecules match now?
if matching:
if len(matching) > 1:
logger.warning("More than one matching pattern found - picking one")
matching = matching[0]
# apply matching: set bond properties
for b in refmol.GetBonds():
atom1 = matching[b.GetBeginAtomIdx()]
atom2 = matching[b.GetEndAtomIdx()]
b2 = mol2.GetBondBetweenAtoms(atom1, atom2)
b2.SetBondType(b.GetBondType())
b2.SetIsAromatic(b.GetIsAromatic())
# apply matching: set atom properties
for a in refmol.GetAtoms():
a2 = mol2.GetAtomWithIdx(matching[a.GetIdx()])
a2.SetHybridization(a.GetHybridization())
a2.SetIsAromatic(a.GetIsAromatic())
a2.SetNumExplicitHs(a.GetNumExplicitHs())
a2.SetFormalCharge(a.GetFormalCharge())
SanitizeMol(mol2)
if hasattr(mol2, '__sssAtoms'):
mol2.__sssAtoms = None # we don't want all bonds highlighted
else:
raise ValueError("No matching found")
return mol2
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest, sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed, tried = _test()
sys.exit(failed)
| 1 | 16,000 | Wrapped with list | rdkit-rdkit | cpp |
@@ -0,0 +1,10 @@
+package stats
+
+// DivideFloat64 returns the value of value divided by base.
+// divide by zero case handled before calling.
+func DivideFloat64(value float64, base float64) (float64, bool) {
+ if base == 0 {
+ return 0, false
+ }
+ return (value / base), true
+} | 1 | 1 | 10,982 | Please move this to `pkg/stats/v1alpha1` | openebs-maya | go |
|
@@ -1,12 +1,13 @@
class GithubUser
+ URL_FORMAT = /\A[^\/]+\Z/
include ActiveModel::Model
attr_accessor :url, :bypass_url_validation
attr_reader :repositories, :module_name, :password
alias_method :username, :url
- validates :url, format: { with: /\A[^\/]+\Z/, message: I18n.t('invalid_github_username') }
- validate :username_must_exist
+ validates :url, format: { with: URL_FORMAT, message: I18n.t('invalid_github_username') }
+ validate :username_must_exist, if: -> { url.match(URL_FORMAT) }
def attributes
{ url: username, type: self.class.name } | 1 | class GithubUser
include ActiveModel::Model
attr_accessor :url, :bypass_url_validation
attr_reader :repositories, :module_name, :password
alias_method :username, :url
validates :url, format: { with: /\A[^\/]+\Z/, message: I18n.t('invalid_github_username') }
validate :username_must_exist
def attributes
{ url: username, type: self.class.name }
end
def save!
create_repositories
end
def create_enlistment_for_project(editor_account, project, ignore = nil)
repositories.each do |repository|
repository.create_enlistment_for_project(editor_account, project, ignore)
end
end
def branch_name
:master
end
class << self
def get_compatible_class(_url)
self
end
def find_existing(_repository)
end
def find_existing_repository(url)
GitRepository.find_by(url: url, branch_name: new.branch_name)
end
end
private
def create_repositories
urls = fetch_repository_urls
@repositories ||= urls.map { |url| GitRepository.find_or_create_by(url: url, branch_name: branch_name) }
end
def fetch_repository_urls
page = 0
repository_urls = []
loop do
page += 1
_stdin, json_repository_data = Open3.popen3('curl', github_url(page))
repository_data = JSON.load(json_repository_data)
break unless repository_data.present?
repository_urls.concat repository_data.map { |data| data['git_url'] }
end
repository_urls
end
def github_url(page)
"#{github_username_url}/repos?page=#{page}&per_page=100"
end
def github_username_url
"https://api.github.com/users/#{username}"
end
def username_must_exist
_stdin, stdout = Open3.popen3('curl', github_username_url)
output = JSON.load(stdout)
errors.add(:url, I18n.t('invalid_github_username')) if output.is_a?(Hash) && output['message'] == 'Not Found'
end
end
| 1 | 8,549 | a small nit pick ...this can go into `lib/patterns.rb` ? | blackducksoftware-ohloh-ui | rb |
@@ -27,7 +27,7 @@ import * as fixtures from '../datastore/__fixtures__';
import fetchMock from 'fetch-mock';
// TODO: update the active class.
-const activeClass = 'googlesitekit-cta-link--danger';
+const activeClass = 'mdc-tab--active';
const url = fixtures.pagespeedMobile.loadingExperience.id;
const setupRegistry = ( { dispatch } ) => {
dispatch( STORE_NAME ).receiveGetReport( fixtures.pagespeedMobile, { url, strategy: STRATEGY_MOBILE } ); | 1 | /**
* DashboardPageSpeed component tests.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internal dependencies
*/
import DashboardPageSpeed from './DashboardPageSpeed';
import { fireEvent, render } from '../../../../../tests/js/test-utils';
import { STORE_NAME, STRATEGY_MOBILE, STRATEGY_DESKTOP } from '../datastore/constants';
import { STORE_NAME as CORE_SITE } from '../../../googlesitekit/datastore/site/constants';
import * as fixtures from '../datastore/__fixtures__';
import fetchMock from 'fetch-mock';
// TODO: update the active class.
const activeClass = 'googlesitekit-cta-link--danger';
const url = fixtures.pagespeedMobile.loadingExperience.id;
const setupRegistry = ( { dispatch } ) => {
dispatch( STORE_NAME ).receiveGetReport( fixtures.pagespeedMobile, { url, strategy: STRATEGY_MOBILE } );
dispatch( STORE_NAME ).receiveGetReport( fixtures.pagespeedDesktop, { url, strategy: STRATEGY_DESKTOP } );
dispatch( CORE_SITE ).receiveSiteInfo( { referenceSiteURL: url } );
};
const setupNoReports = ( { dispatch } ) => {
dispatch( CORE_SITE ).receiveSiteInfo( { referenceSiteURL: url } );
};
const setupRegistryNoFieldDataDesktop = ( { dispatch } ) => {
// eslint-disable-next-line no-unused-vars
const { metrics, ...desktopLoadingExperience } = fixtures.pagespeedDesktop.loadingExperience;
dispatch( STORE_NAME ).receiveGetReport( fixtures.pagespeedMobile, { url, strategy: STRATEGY_MOBILE } );
dispatch( STORE_NAME ).receiveGetReport( {
...fixtures.pagespeedDesktop,
loadingExperience: desktopLoadingExperience, // no field data metrics
}, { url, strategy: STRATEGY_DESKTOP } );
dispatch( CORE_SITE ).receiveSiteInfo( { referenceSiteURL: url } );
};
describe( 'DashboardPageSpeed', () => {
afterEach( fetchMock.mockClear );
it( 'renders a progress bar while reports are requested', () => {
fetchMock.get(
/^\/google-site-kit\/v1\/modules\/pagespeed-insights\/data\/pagespeed/,
new Promise( () => {} ), // Don't return a response.
);
const { queryByRole } = render( <DashboardPageSpeed />, { setupRegistry: setupNoReports } );
expect( queryByRole( 'progressbar' ) ).toBeInTheDocument();
} );
it( 'displays field data by default when available in both mobile and desktop reports', () => {
expect( fixtures.pagespeedMobile.loadingExperience ).toHaveProperty( 'metrics' );
expect( fixtures.pagespeedDesktop.loadingExperience ).toHaveProperty( 'metrics' );
const { getByText } = render( <DashboardPageSpeed />, { setupRegistry } );
expect( getByText( /In the Field/i ) ).toHaveClass( activeClass );
} );
it( 'displays lab data by default when field data is not present in both mobile and desktop reports', () => {
const { getByText } = render( <DashboardPageSpeed />, { setupRegistry: setupRegistryNoFieldDataDesktop } );
expect( getByText( /In the Lab/i ) ).toHaveClass( activeClass );
expect( getByText( /In the Field/i ) ).not.toHaveClass( activeClass );
} );
it( 'displays the mobile data by default', () => {
const { getByText } = render( <DashboardPageSpeed />, { setupRegistry } );
expect( getByText( /mobile/i ) ).toHaveClass( activeClass );
} );
it( 'has tabs for toggling the displayed data source', () => {
const { getByText } = render( <DashboardPageSpeed />, { setupRegistry } );
const labDataTabLink = getByText( /In the Lab/i );
expect( labDataTabLink ).not.toHaveClass( activeClass );
fireEvent.click( labDataTabLink );
expect( labDataTabLink ).toHaveClass( activeClass );
expect( getByText( /In the Field/i ) ).not.toHaveClass( activeClass );
} );
it( 'has tabs for toggling the tested device', () => {
const { getByText } = render( <DashboardPageSpeed />, { setupRegistry } );
const desktopToggle = getByText( /desktop/i );
expect( desktopToggle ).not.toHaveClass( activeClass );
fireEvent.click( desktopToggle );
expect( desktopToggle ).toHaveClass( activeClass );
expect( getByText( /mobile/i ) ).not.toHaveClass( activeClass );
} );
it( 'displays a "Field data unavailable" message when field data is not available', () => {
const { getByText, queryByText, registry } = render( <DashboardPageSpeed />, { setupRegistry: setupRegistryNoFieldDataDesktop } );
const { getReport } = registry.select( STORE_NAME );
expect( getReport( url, STRATEGY_MOBILE ).loadingExperience ).toHaveProperty( 'metrics' );
expect( getReport( url, STRATEGY_DESKTOP ).loadingExperience ).not.toHaveProperty( 'metrics' );
// Lab data is shown by default as both reports do not have field data.
expect( getByText( /In the Lab/i ) ).toHaveClass( activeClass );
// Switch to Field data source.
fireEvent.click( getByText( /In the Field/i ) );
expect( getByText( /mobile/i ) ).toHaveClass( activeClass );
// Mobile has field data, so ensure the no data message is not present.
expect( queryByText( /Field data unavailable/i ) ).not.toBeInTheDocument();
// Switch to desktop and expect to see the no data message.
fireEvent.click( getByText( /desktop/i ) );
expect( queryByText( /Field data unavailable/i ) ).toBeInTheDocument();
} );
} );
| 1 | 29,658 | The above TODO is no longer necessary I believe | google-site-kit-wp | js |
@@ -345,7 +345,7 @@ func getPoolLivenessProbe() *corev1.Probe {
probe := &corev1.Probe{
Handler: corev1.Handler{
Exec: &corev1.ExecAction{
- Command: []string{"/bin/sh", "-c", "zfs set io.openebs:livenesstimestamp=\"$(date)\" cstor-$OPENEBS_IO_POOL_NAME"},
+ Command: []string{"/bin/sh", "-c", "zfs set io.openebs:livenesstimestamp=\"$(date +%s)\" cstor-$OPENEBS_IO_POOL_NAME"},
},
},
FailureThreshold: 3, | 1 | /*
Copyright 2019 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"os"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
apiscsp "github.com/openebs/maya/pkg/cstor/poolinstance/v1alpha3"
container "github.com/openebs/maya/pkg/kubernetes/container/v1alpha1"
deploy "github.com/openebs/maya/pkg/kubernetes/deployment/appsv1/v1alpha1"
pts "github.com/openebs/maya/pkg/kubernetes/podtemplatespec/v1alpha1"
volume "github.com/openebs/maya/pkg/kubernetes/volume/v1alpha1"
"github.com/openebs/maya/pkg/version"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// OpenEBSServiceAccount name of the openebs service accout with required
// permissions
const (
OpenEBSServiceAccount = "openebs-maya-operator"
// PoolMgmtContainerName is the name of cstor target container name
PoolMgmtContainerName = "cstor-pool-mgmt"
// PoolContainerName is the name of cstor target container name
PoolContainerName = "cstor-pool"
// PoolExporterContainerName is the name of cstor target container name
PoolExporterContainerName = "maya-exporter"
)
var (
// run container in privileged mode configuration that will be
// applied to a container.
privileged = true
defaultPoolMgmtMounts = []corev1.VolumeMount{
corev1.VolumeMount{
Name: "device",
MountPath: "/dev",
},
corev1.VolumeMount{
Name: "tmp",
MountPath: "/tmp",
},
corev1.VolumeMount{
Name: "udev",
MountPath: "/run/udev",
},
}
// hostpathType represents the hostpath type
hostpathTypeDirectory = corev1.HostPathDirectory
// hostpathType represents the hostpath type
hostpathTypeDirectoryOrCreate = corev1.HostPathDirectoryOrCreate
)
// CreateStoragePool creates the required resource to provision a cStor pool
func (pc *PoolConfig) CreateStoragePool() error {
cspObj, err := pc.AlgorithmConfig.GetCSPSpec()
if err != nil {
return errors.Wrap(err, "failed to get CSP spec")
}
gotCSP, err := pc.createCSP(cspObj)
if err != nil {
return errors.Wrap(err, "failed to create CSP")
}
err = pc.createDeployForCSP(gotCSP)
if err != nil {
return errors.Wrapf(err, "failed to create deployment for CSP {%s}", gotCSP.Name)
}
return nil
}
func (pc *PoolConfig) createCSP(csp *apis.CStorPoolInstance) (*apis.CStorPoolInstance, error) {
gotCSP, err := apiscsp.NewKubeClient().WithNamespace(pc.AlgorithmConfig.Namespace).Create(csp)
return gotCSP, err
}
func (pc *PoolConfig) createPoolDeployment(deployObj *appsv1.Deployment) error {
_, err := deploy.NewKubeClient().WithNamespace(pc.AlgorithmConfig.Namespace).Create(deployObj)
return err
}
// GetPoolDeploySpec returns the pool deployment spec.
func (pc *PoolConfig) GetPoolDeploySpec(csp *apis.CStorPoolInstance) (*appsv1.Deployment, error) {
deployObj, err := deploy.NewBuilder().
WithName(csp.Name).
WithNamespace(csp.Namespace).
WithAnnotationsNew(getDeployAnnotations()).
WithLabelsNew(getDeployLabels(csp)).
WithOwnerReferenceNew(getDeployOwnerReference(csp)).
WithReplicas(getReplicaCount()).
WithStrategyType(appsv1.RecreateDeploymentStrategyType).
WithSelectorMatchLabelsNew(getDeployMatchLabels()).
WithPodTemplateSpecBuilder(
pts.NewBuilder().
WithLabelsNew(getPodLabels(csp)).
WithNodeSelector(csp.Spec.NodeSelector).
WithAnnotationsNew(getPodAnnotations()).
WithServiceAccountName(OpenEBSServiceAccount).
// For CStor-Pool-Mgmt container
WithContainerBuilders(
container.NewBuilder().
WithImage(getPoolMgmtImage()).
WithName(PoolMgmtContainerName).
WithImagePullPolicy(corev1.PullIfNotPresent).
WithPrivilegedSecurityContext(&privileged).
WithEnvsNew(getPoolMgmtEnv(csp)).
WithEnvs(getPoolUIDAsEnv(pc.AlgorithmConfig.CSPC)).
WithResourcesByValue(getAuxResourceRequirement(csp)).
WithVolumeMountsNew(getPoolMgmtMounts()),
// For CStor-Pool container
container.NewBuilder().
WithImage(getPoolImage()).
WithName(PoolContainerName).
WithResources(getResourceRequirementForCStorPool(csp)).
WithImagePullPolicy(corev1.PullIfNotPresent).
WithPrivilegedSecurityContext(&privileged).
WithPortsNew(getContainerPort(12000, 3232, 3233)).
WithLivenessProbe(getPoolLivenessProbe()).
WithEnvsNew(getPoolEnv(csp)).
WithEnvs(getPoolUIDAsEnv(pc.AlgorithmConfig.CSPC)).
WithLifeCycle(getPoolLifeCycle()).
WithVolumeMountsNew(getPoolMounts()),
// For maya exporter
container.NewBuilder().
WithImage(getMayaExporterImage()).
WithName(PoolExporterContainerName).
WithResourcesByValue(getAuxResourceRequirement(csp)).
// TODO : Resource and Limit
WithImagePullPolicy(corev1.PullIfNotPresent).
WithPrivilegedSecurityContext(&privileged).
WithPortsNew(getContainerPort(9500)).
WithCommandNew([]string{"maya-exporter"}).
WithArgumentsNew([]string{"-e=pool"}).
WithVolumeMountsNew(getPoolMounts()),
).
// TODO : Add toleration
WithVolumeBuilders(
volume.NewBuilder().
WithName("device").
WithHostPathAndType(
"/dev",
&hostpathTypeDirectory,
),
volume.NewBuilder().
WithName("udev").
WithHostPathAndType(
"/run/udev",
&hostpathTypeDirectory,
),
volume.NewBuilder().
WithName("tmp").
WithHostPathAndType(
getSparseDirPath()+"/shared-"+csp.Name,
&hostpathTypeDirectoryOrCreate,
),
volume.NewBuilder().
WithName("sparse").
WithHostPathAndType(
getSparseDirPath(),
&hostpathTypeDirectoryOrCreate,
),
),
).
Build()
if err != nil {
return nil, errors.Wrapf(err, "failed to build pool deployment object")
}
return deployObj, nil
}
func getReplicaCount() *int32 {
var count int32 = 1
return &count
}
func getDeployOwnerReference(csp *apis.CStorPoolInstance) []metav1.OwnerReference {
OwnerReference := []metav1.OwnerReference{
*metav1.NewControllerRef(csp, apis.SchemeGroupVersion.WithKind("CStorPoolInstance")),
}
return OwnerReference
}
// TODO: Use builder for labels and annotations
func getDeployLabels(csp *apis.CStorPoolInstance) map[string]string {
return map[string]string{
string(apis.CStorPoolClusterCPK): csp.Labels[string(apis.CStorPoolClusterCPK)],
"app": "cstor-pool",
"openebs.io/cstor-pool-instance": csp.Name,
"openebs.io/version": version.GetVersion(),
}
}
func getDeployAnnotations() map[string]string {
return map[string]string{
"openebs.io/monitoring": "pool_exporter_prometheus",
}
}
func getPodLabels(csp *apis.CStorPoolInstance) map[string]string {
return getDeployLabels(csp)
}
func getPodAnnotations() map[string]string {
return map[string]string{
"openebs.io/monitoring": "pool_exporter_prometheus",
"prometheus.io/path": "/metrics",
"prometheus.io/port": "9500",
"prometheus.io/scrape": "true",
"cluster-autoscaler.kubernetes.io/safe-to-evict": "false",
}
}
func getDeployMatchLabels() map[string]string {
return map[string]string{
"app": "cstor-pool",
}
}
// getVolumeTargetImage returns Volume target image
// retrieves the value of the environment variable named
// by the key.
func getPoolMgmtImage() string {
image, present := os.LookupEnv("OPENEBS_IO_CSPI_MGMT_IMAGE")
if !present {
image = "quay.io/openebs/cspi-mgmt:ci"
}
return image
}
// getVolumeTargetImage returns Volume target image
// retrieves the value of the environment variable named
// by the key.
func getPoolImage() string {
image, present := os.LookupEnv("OPENEBS_IO_CSTOR_POOL_IMAGE")
if !present {
image = "quay.io/openebs/cstor-pool:ci"
}
return image
}
// getVolumeTargetImage returns Volume target image
// retrieves the value of the environment variable named
// by the key.
func getMayaExporterImage() string {
image, present := os.LookupEnv("OPENEBS_IO_CSTOR_POOL_EXPORTER_IMAGE")
if !present {
image = "quay.io/openebs/m-exporter:ci"
}
return image
}
func getContainerPort(port ...int32) []corev1.ContainerPort {
var containerPorts []corev1.ContainerPort
for _, p := range port {
containerPorts = append(containerPorts, corev1.ContainerPort{ContainerPort: p, Protocol: "TCP"})
}
return containerPorts
}
func getPoolMgmtMounts() []corev1.VolumeMount {
return append(
defaultPoolMgmtMounts,
corev1.VolumeMount{
Name: "sparse",
MountPath: getSparseDirPath(),
},
)
}
func getSparseDirPath() string {
dir, present := os.LookupEnv("OPENEBS_IO_CSTOR_POOL_SPARSE_DIR")
if !present {
dir = "/var/openebs/sparse"
}
return dir
}
func getPoolUIDAsEnv(cspc *apis.CStorPoolCluster) []corev1.EnvVar {
var env []corev1.EnvVar
return append(
env,
corev1.EnvVar{
Name: "OPENEBS_IO_POOL_NAME",
Value: string(cspc.GetUID()),
},
)
}
func getPoolMgmtEnv(cspi *apis.CStorPoolInstance) []corev1.EnvVar {
var env []corev1.EnvVar
return append(
env,
corev1.EnvVar{
Name: "OPENEBS_IO_CSPI_ID",
Value: string(cspi.GetUID()),
},
corev1.EnvVar{
Name: "RESYNC_INTERVAL",
// TODO : Add tunable
Value: "30",
},
corev1.EnvVar{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
corev1.EnvVar{
Name: "NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
)
}
func getPoolLivenessProbe() *corev1.Probe {
probe := &corev1.Probe{
Handler: corev1.Handler{
Exec: &corev1.ExecAction{
Command: []string{"/bin/sh", "-c", "zfs set io.openebs:livenesstimestamp=\"$(date)\" cstor-$OPENEBS_IO_POOL_NAME"},
},
},
FailureThreshold: 3,
InitialDelaySeconds: 300,
PeriodSeconds: 10,
TimeoutSeconds: 300,
}
return probe
}
func getPoolMounts() []corev1.VolumeMount {
return getPoolMgmtMounts()
}
func getPoolEnv(csp *apis.CStorPoolInstance) []corev1.EnvVar {
var env []corev1.EnvVar
return append(
env,
corev1.EnvVar{
Name: "OPENEBS_IO_CSTOR_ID",
Value: string(csp.GetUID()),
},
)
}
func getPoolLifeCycle() *corev1.Lifecycle {
lc := &corev1.Lifecycle{
PostStart: &corev1.Handler{
Exec: &corev1.ExecAction{
Command: []string{"/bin/sh", "-c", "sleep 2"},
},
},
}
return lc
}
// getResourceRequirementForCStorPool returns resource requirement.
func getResourceRequirementForCStorPool(cspi *apis.CStorPoolInstance) *corev1.ResourceRequirements {
var resourceRequirements *corev1.ResourceRequirements
if cspi.Spec.PoolConfig.Resources == nil {
resourceRequirements = &corev1.ResourceRequirements{}
} else {
resourceRequirements = cspi.Spec.PoolConfig.Resources
}
return resourceRequirements
}
func getAuxResourceRequirement(cspi *apis.CStorPoolInstance) corev1.ResourceRequirements {
return cspi.Spec.AuxResources
}
| 1 | 17,572 | Why is this change required ? | openebs-maya | go |
@@ -0,0 +1,16 @@
+package summary
+
+import (
+ "bytes"
+
+ "github.com/russross/blackfriday"
+)
+
+func Markdown(input []byte, wordcount int) []byte {
+ words := bytes.Fields(blackfriday.Markdown(input, Renderer{}, 0))
+ if wordcount > len(words) {
+ wordcount = len(words)
+ }
+
+ return bytes.Join(words[0:wordcount], []byte{' '})
+} | 1 | 1 | 8,083 | This really is an excerpt. ;-) | caddyserver-caddy | go |
|
@@ -164,7 +164,7 @@ func (m *mock) IsWithinDepth(addr swarm.Address) bool {
if m.isWithinFunc != nil {
return m.isWithinFunc(addr)
}
- return false
+ return true
}
func (m *mock) EachNeighbor(f topology.EachPeerFunc) error { | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mock
import (
"context"
"sync"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/topology"
)
type mock struct {
peers []swarm.Address
depth uint8
closestPeer swarm.Address
closestPeerErr error
peersErr error
addPeersErr error
isWithinFunc func(c swarm.Address) bool
marshalJSONFunc func() ([]byte, error)
mtx sync.Mutex
}
func WithPeers(peers ...swarm.Address) Option {
return optionFunc(func(d *mock) {
d.peers = peers
})
}
func WithAddPeersErr(err error) Option {
return optionFunc(func(d *mock) {
d.addPeersErr = err
})
}
func WithNeighborhoodDepth(dd uint8) Option {
return optionFunc(func(d *mock) {
d.depth = dd
})
}
func WithClosestPeer(addr swarm.Address) Option {
return optionFunc(func(d *mock) {
d.closestPeer = addr
})
}
func WithClosestPeerErr(err error) Option {
return optionFunc(func(d *mock) {
d.closestPeerErr = err
})
}
func WithMarshalJSONFunc(f func() ([]byte, error)) Option {
return optionFunc(func(d *mock) {
d.marshalJSONFunc = f
})
}
func WithIsWithinFunc(f func(swarm.Address) bool) Option {
return optionFunc(func(d *mock) {
d.isWithinFunc = f
})
}
func NewTopologyDriver(opts ...Option) topology.Driver {
d := new(mock)
for _, o := range opts {
o.apply(d)
}
return d
}
func (d *mock) AddPeers(addrs ...swarm.Address) {
d.mtx.Lock()
defer d.mtx.Unlock()
d.peers = append(d.peers, addrs...)
}
func (d *mock) Connected(ctx context.Context, peer p2p.Peer, _ bool) error {
d.AddPeers(peer.Address)
return nil
}
func (d *mock) Disconnected(peer p2p.Peer) {
d.mtx.Lock()
defer d.mtx.Unlock()
for i, addr := range d.peers {
if addr.Equal(peer.Address) {
d.peers = append(d.peers[:i], d.peers[i+1:]...)
break
}
}
}
func (d *mock) Announce(_ context.Context, _ swarm.Address, _ bool) error {
return nil
}
func (d *mock) Peers() []swarm.Address {
return d.peers
}
func (d *mock) ClosestPeer(addr swarm.Address, _ bool, skipPeers ...swarm.Address) (peerAddr swarm.Address, err error) {
if len(skipPeers) == 0 {
if d.closestPeerErr != nil {
return d.closestPeer, d.closestPeerErr
}
if !d.closestPeer.Equal(swarm.ZeroAddress) {
return d.closestPeer, nil
}
}
d.mtx.Lock()
defer d.mtx.Unlock()
if len(d.peers) == 0 {
return peerAddr, topology.ErrNotFound
}
skipPeer := false
for _, p := range d.peers {
for _, a := range skipPeers {
if a.Equal(p) {
skipPeer = true
break
}
}
if skipPeer {
skipPeer = false
continue
}
if peerAddr.IsZero() {
peerAddr = p
}
if cmp, _ := swarm.DistanceCmp(addr.Bytes(), p.Bytes(), peerAddr.Bytes()); cmp == 1 {
peerAddr = p
}
}
if peerAddr.IsZero() {
return peerAddr, topology.ErrNotFound
}
return peerAddr, nil
}
func (d *mock) SubscribePeersChange() (c <-chan struct{}, unsubscribe func()) {
return c, unsubscribe
}
func (m *mock) NeighborhoodDepth() uint8 {
return m.depth
}
func (m *mock) IsWithinDepth(addr swarm.Address) bool {
if m.isWithinFunc != nil {
return m.isWithinFunc(addr)
}
return false
}
func (m *mock) EachNeighbor(f topology.EachPeerFunc) error {
return m.EachPeer(f)
}
func (*mock) EachNeighborRev(topology.EachPeerFunc) error {
panic("not implemented") // TODO: Implement
}
// EachPeer iterates from closest bin to farthest
func (d *mock) EachPeer(f topology.EachPeerFunc) (err error) {
d.mtx.Lock()
defer d.mtx.Unlock()
if d.peersErr != nil {
return d.peersErr
}
for i, p := range d.peers {
_, _, err = f(p, uint8(i))
if err != nil {
return
}
}
return nil
}
// EachPeerRev iterates from farthest bin to closest
func (d *mock) EachPeerRev(f topology.EachPeerFunc) (err error) {
d.mtx.Lock()
defer d.mtx.Unlock()
for i := len(d.peers) - 1; i >= 0; i-- {
_, _, err = f(d.peers[i], uint8(i))
if err != nil {
return
}
}
return nil
}
func (d *mock) Snapshot() *topology.KadParams {
return new(topology.KadParams)
}
func (d *mock) Halt() {}
func (d *mock) Close() error { return nil }
type Option interface {
apply(*mock)
}
type optionFunc func(*mock)
func (f optionFunc) apply(r *mock) { f(r) }
| 1 | 15,102 | @acud The reason why the default is now `true` is because the chunk must be in the neighborhood of the storer peer with this change. The majority of the tests expect a valid receipt so the closest peers in the tests must be in the neighborhood. As of writing this, there is only one test that expects a peer to be out of depth. | ethersphere-bee | go |
@@ -44,6 +44,7 @@ import (
"go.temporal.io/server/common/log/tag"
_ "go.temporal.io/server/common/persistence/sql/sqlplugin/mysql" // needed to load mysql plugin
_ "go.temporal.io/server/common/persistence/sql/sqlplugin/postgresql" // needed to load postgresql plugin
+ _ "go.temporal.io/server/common/persistence/sql/sqlplugin/sqlite" // needed to load sqlite plugin
"go.temporal.io/server/temporal"
)
| 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package main
import (
"fmt"
stdlog "log"
"os"
"path"
"runtime"
"strings"
_ "time/tzdata" // embed tzdata as a fallback
"github.com/urfave/cli/v2"
"go.temporal.io/server/build"
"go.temporal.io/server/common/authorization"
"go.temporal.io/server/common/config"
"go.temporal.io/server/common/dynamicconfig"
"go.temporal.io/server/common/headers"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
_ "go.temporal.io/server/common/persistence/sql/sqlplugin/mysql" // needed to load mysql plugin
_ "go.temporal.io/server/common/persistence/sql/sqlplugin/postgresql" // needed to load postgresql plugin
"go.temporal.io/server/temporal"
)
// main entry point for the temporal server
func main() {
app := buildCLI()
_ = app.Run(os.Args)
}
// buildCLI is the main entry point for the temporal server
func buildCLI() *cli.App {
app := cli.NewApp()
app.Name = "temporal"
app.Usage = "Temporal server"
app.Version = headers.ServerVersion
app.ArgsUsage = " "
app.Flags = []cli.Flag{
&cli.StringFlag{
Name: "root",
Aliases: []string{"r"},
Value: ".",
Usage: "root directory of execution environment",
EnvVars: []string{config.EnvKeyRoot},
},
&cli.StringFlag{
Name: "config",
Aliases: []string{"c"},
Value: "config",
Usage: "config dir path relative to root",
EnvVars: []string{config.EnvKeyConfigDir},
},
&cli.StringFlag{
Name: "env",
Aliases: []string{"e"},
Value: "development",
Usage: "runtime environment",
EnvVars: []string{config.EnvKeyEnvironment},
},
&cli.StringFlag{
Name: "zone",
Aliases: []string{"az"},
Usage: "availability zone",
EnvVars: []string{config.EnvKeyAvailabilityZone, config.EnvKeyAvailabilityZoneTypo},
},
}
app.Commands = []*cli.Command{
{
Name: "start",
Usage: "Start Temporal server",
ArgsUsage: " ",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "services",
Aliases: []string{"s"},
Usage: "comma separated list of services to start. Deprecated",
Hidden: true,
},
&cli.StringSliceFlag{
Name: "service",
Aliases: []string{"svc"},
Value: cli.NewStringSlice(temporal.Services...),
Usage: "service(s) to start",
},
},
Before: func(c *cli.Context) error {
if c.Args().Len() > 0 {
return cli.Exit("ERROR: start command doesn't support arguments. Use --service flag instead.", 1)
}
return nil
},
Action: func(c *cli.Context) error {
env := c.String("env")
zone := c.String("zone")
configDir := path.Join(c.String("root"), c.String("config"))
services := c.StringSlice("service")
// For backward compatibility to support old flag format (i.e. `--services=frontend,history,matching`).
if c.IsSet("services") {
stdlog.Println("WARNING: --services flag is deprecated. Specify multiply --service flags instead.")
services = strings.Split(c.String("services"), ",")
}
cfg, err := config.LoadConfig(env, configDir, zone)
if err != nil {
return cli.Exit(fmt.Sprintf("Unable to load configuration: %v.", err), 1)
}
logger := log.NewZapLogger(log.BuildZapLogger(cfg.Log))
logger.Info("Build info",
tag.Timestamp(build.InfoData.BuildTime()),
tag.NewStringTag("git-revision", build.InfoData.GitRevision),
tag.NewStringTag("platform", runtime.GOARCH),
tag.NewStringTag("go-version", runtime.Version()),
tag.NewStringTag("server-version", headers.ServerVersion),
)
var dynamicConfigClient dynamicconfig.Client
if cfg.DynamicConfigClient != nil {
dynamicConfigClient, err = dynamicconfig.NewFileBasedClient(cfg.DynamicConfigClient, logger, temporal.InterruptCh())
if err != nil {
return cli.Exit(fmt.Sprintf("Unable to create dynamic config client. Error: %v", err), 1)
}
} else {
dynamicConfigClient = dynamicconfig.NewNoopClient()
logger.Info("Dynamic config client is not configured. Using noop client.")
}
authorizer, err := authorization.GetAuthorizerFromConfig(
&cfg.Global.Authorization,
)
claimMapper, err := authorization.GetClaimMapperFromConfig(&cfg.Global.Authorization, logger)
if err != nil {
return cli.Exit(fmt.Sprintf("Unable to instantiate claim mapper: %v.", err), 1)
}
s := temporal.NewServer(
temporal.ForServices(services),
temporal.WithConfig(cfg),
temporal.WithDynamicConfigClient(dynamicConfigClient),
temporal.WithLogger(logger),
temporal.InterruptOn(temporal.InterruptCh()),
temporal.WithAuthorizer(authorizer),
temporal.WithClaimMapper(func(cfg *config.Config) authorization.ClaimMapper {
return claimMapper
}),
)
err = s.Start()
if err != nil {
return cli.Exit(fmt.Sprintf("Unable to start server. Error: %v", err), 1)
}
return cli.Exit("All services are stopped.", 0)
},
},
}
return app
}
| 1 | 13,101 | This is what breaks the build. Apparently w/o this line, `sqlite` package is not even compiled. With this line it gets compiled but compilation fails because we don't use CGO for our production Linux builds and > ***Important: because this is a `CGO` enabled package you are required to set the environment variable `CGO_ENABLED=1` and have a `gcc` compile present within your path.*** | temporalio-temporal | go |
@@ -58,7 +58,7 @@ test_get_size()
}
// Check sizes of FP/SIMD regs.
- for (uint i = 0; i < NUM_SIMD_SLOTS; i++) {
+ for (int i = 0; i < proc_num_simd_saved(); i++) {
ASSERT(reg_get_size((reg_id_t)DR_REG_H0 + i) == OPSZ_2);
ASSERT(reg_get_size((reg_id_t)DR_REG_S0 + i) == OPSZ_4);
ASSERT(reg_get_size((reg_id_t)DR_REG_D0 + i) == OPSZ_8); | 1 | /* **********************************************************
* Copyright (c) 2018 Arm Limited. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* This file contains unit tests for the APIs exported from opnd.h. */
#include "configure.h"
#include "dr_api.h"
#include <stdio.h>
#define ASSERT(x) \
((void)((!(x)) ? (fprintf(stderr, "ASSERT FAILURE: %s:%d: %s\n", __FILE__, __LINE__, \
#x), \
abort(), 0) \
: 0))
static void
test_get_size()
{
/* Check sizes of special registers. */
ASSERT(reg_get_size(DR_REG_WZR) == OPSZ_4);
ASSERT(reg_get_size(DR_REG_XZR) == OPSZ_8);
ASSERT(reg_get_size(DR_REG_SP) == OPSZ_8);
ASSERT(reg_get_size(DR_REG_XSP) == OPSZ_8);
// Check sizes of GPRs.
for (uint i = 0; i < DR_NUM_GPR_REGS; i++) {
ASSERT(reg_get_size((reg_id_t)DR_REG_W0 + i) == OPSZ_4);
ASSERT(reg_get_size((reg_id_t)DR_REG_X0 + i) == OPSZ_8);
}
// Check sizes of FP/SIMD regs.
for (uint i = 0; i < NUM_SIMD_SLOTS; i++) {
ASSERT(reg_get_size((reg_id_t)DR_REG_H0 + i) == OPSZ_2);
ASSERT(reg_get_size((reg_id_t)DR_REG_S0 + i) == OPSZ_4);
ASSERT(reg_get_size((reg_id_t)DR_REG_D0 + i) == OPSZ_8);
ASSERT(reg_get_size((reg_id_t)DR_REG_Q0 + i) == OPSZ_16);
}
// Check sizes of SVE vector regs.
for (uint i = 0; i < 32; i++) {
ASSERT(reg_get_size((reg_id_t)DR_REG_Z0 + i) == OPSZ_SCALABLE);
}
// Check sizes of SVE predicate regs.
for (uint i = 0; i < 16; i++) {
ASSERT(reg_get_size((reg_id_t)DR_REG_P0 + i) == OPSZ_SCALABLE_PRED);
}
}
int
main(int argc, char *argv[])
{
test_get_size();
printf("all done\n");
return 0;
}
| 1 | 15,581 | This should be all slots | DynamoRIO-dynamorio | c |
@@ -66,7 +66,7 @@ public final class RegistryUtils {
serviceRegistry.run();
}
- public static void destory() {
+ public static void destroy() {
serviceRegistry.destroy();
}
| 1 | /*
* Copyright 2017 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicecomb.serviceregistry;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import java.util.Map;
import org.apache.http.client.utils.URIBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.config.DynamicPropertyFactory;
import io.servicecomb.config.ConfigUtil;
import io.servicecomb.config.archaius.sources.MicroserviceConfigLoader;
import io.servicecomb.foundation.common.event.EventManager;
import io.servicecomb.foundation.common.net.IpPort;
import io.servicecomb.foundation.common.net.NetUtils;
import io.servicecomb.serviceregistry.api.registry.Microservice;
import io.servicecomb.serviceregistry.api.registry.MicroserviceInstance;
import io.servicecomb.serviceregistry.cache.InstanceCacheManager;
import io.servicecomb.serviceregistry.client.ServiceRegistryClient;
import io.servicecomb.serviceregistry.config.ServiceRegistryConfig;
import io.servicecomb.serviceregistry.definition.MicroserviceDefinition;
import io.servicecomb.serviceregistry.registry.ServiceRegistryFactory;
public final class RegistryUtils {
private static final Logger LOGGER = LoggerFactory.getLogger(RegistryUtils.class);
private static ServiceRegistry serviceRegistry;
// value is ip or {interface name}
public static final String PUBLISH_ADDRESS = "cse.service.publishAddress";
private static final String PUBLISH_PORT = "cse.{transport_name}.publishPort";
private RegistryUtils() {
}
public static void init() {
MicroserviceConfigLoader loader = ConfigUtil.getMicroserviceConfigLoader();
MicroserviceDefinition microserviceDefinition = new MicroserviceDefinition(loader.getConfigModels());
serviceRegistry =
ServiceRegistryFactory.getOrCreate(EventManager.eventBus, ServiceRegistryConfig.INSTANCE, microserviceDefinition);
serviceRegistry.init();
}
public static void run() {
serviceRegistry.run();
}
public static void destory() {
serviceRegistry.destroy();
}
public static ServiceRegistry getServiceRegistry() {
return serviceRegistry;
}
public static void setServiceRegistry(ServiceRegistry serviceRegistry) {
RegistryUtils.serviceRegistry = serviceRegistry;
}
public static ServiceRegistryClient getServiceRegistryClient() {
return serviceRegistry.getServiceRegistryClient();
}
public static InstanceCacheManager getInstanceCacheManager() {
return serviceRegistry.getInstanceCacheManager();
}
public static String getAppId() {
return serviceRegistry.getMicroservice().getAppId();
}
public static Microservice getMicroservice() {
return serviceRegistry.getMicroservice();
}
public static MicroserviceInstance getMicroserviceInstance() {
return serviceRegistry.getMicroserviceInstance();
}
public static String getPublishAddress() {
String publicAddressSetting =
DynamicPropertyFactory.getInstance().getStringProperty(PUBLISH_ADDRESS, "").get();
publicAddressSetting = publicAddressSetting.trim();
if (publicAddressSetting.isEmpty()) {
return NetUtils.getHostAddress();
}
// placeholder is network interface name
if (publicAddressSetting.startsWith("{") && publicAddressSetting.endsWith("}")) {
return NetUtils
.ensureGetInterfaceAddress(publicAddressSetting.substring(1, publicAddressSetting.length() - 1))
.getHostAddress();
}
return publicAddressSetting;
}
public static String getPublishHostName() {
String publicAddressSetting =
DynamicPropertyFactory.getInstance().getStringProperty(PUBLISH_ADDRESS, "").get();
publicAddressSetting = publicAddressSetting.trim();
if (publicAddressSetting.isEmpty()) {
return NetUtils.getHostName();
}
if (publicAddressSetting.startsWith("{") && publicAddressSetting.endsWith("}")) {
return NetUtils
.ensureGetInterfaceAddress(publicAddressSetting.substring(1, publicAddressSetting.length() - 1))
.getHostName();
}
return publicAddressSetting;
}
/**
* 对于配置为0.0.0.0的地址,通过查询网卡地址,转换为实际监听的地址。
*/
public static String getPublishAddress(String schema, String address) {
if (address == null) {
return address;
}
try {
URI originalURI = new URI(schema + "://" + address);
IpPort ipPort = NetUtils.parseIpPort(originalURI.getAuthority());
if (ipPort == null) {
LOGGER.warn("address {} not valid.", address);
return null;
}
IpPort publishIpPort = genPublishIpPort(schema, ipPort);
URIBuilder builder = new URIBuilder(originalURI);
return builder.setHost(publishIpPort.getHostOrIp()).setPort(publishIpPort.getPort()).build().toString();
} catch (URISyntaxException e) {
LOGGER.warn("address {} not valid.", address);
return null;
}
}
private static IpPort genPublishIpPort(String schema, IpPort ipPort) {
String publicAddressSetting = DynamicPropertyFactory.getInstance()
.getStringProperty(PUBLISH_ADDRESS, "")
.get();
publicAddressSetting = publicAddressSetting.trim();
if (publicAddressSetting.isEmpty()) {
InetSocketAddress socketAddress = ipPort.getSocketAddress();
if (socketAddress.getAddress().isAnyLocalAddress()) {
String host = NetUtils.getHostAddress();
LOGGER.warn("address {}, auto select a host address to publish {}:{}, maybe not the correct one",
socketAddress,
host,
socketAddress.getPort());
return new IpPort(host, ipPort.getPort());
}
return ipPort;
}
if (publicAddressSetting.startsWith("{") && publicAddressSetting.endsWith("}")) {
publicAddressSetting = NetUtils
.ensureGetInterfaceAddress(
publicAddressSetting.substring(1, publicAddressSetting.length() - 1))
.getHostAddress();
}
String publishPortKey = PUBLISH_PORT.replace("{transport_name}", schema);
int publishPortSetting = DynamicPropertyFactory.getInstance()
.getIntProperty(publishPortKey, 0)
.get();
int publishPort = publishPortSetting == 0 ? ipPort.getPort() : publishPortSetting;
return new IpPort(publicAddressSetting, publishPort);
}
public static List<MicroserviceInstance> findServiceInstance(String appId, String serviceName,
String versionRule) {
return serviceRegistry.findServiceInstance(appId, serviceName, versionRule);
}
// update microservice instance properties
public static boolean updateInstanceProperties(Map<String, String> instanceProperties) {
return serviceRegistry.updateInstanceProperties(instanceProperties);
}
public static Microservice getMicroservice(String microserviceId) {
return serviceRegistry.getRemoteMicroservice(microserviceId);
}
}
| 1 | 7,950 | Need to add deprecated annotation and call the new fixed method to avoid the API broken issue. | apache-servicecomb-java-chassis | java |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.